switchtec.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632
  1. /*
  2. * Microsemi Switchtec(tm) PCIe Management Driver
  3. * Copyright (c) 2017, Microsemi Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. */
  15. #include <linux/switchtec_ioctl.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/module.h>
  18. #include <linux/fs.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/poll.h>
  21. #include <linux/pci.h>
  22. #include <linux/cdev.h>
  23. #include <linux/wait.h>
  24. MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
  25. MODULE_VERSION("0.1");
  26. MODULE_LICENSE("GPL");
  27. MODULE_AUTHOR("Microsemi Corporation");
  28. static int max_devices = 16;
  29. module_param(max_devices, int, 0644);
  30. MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
  31. static dev_t switchtec_devt;
  32. static struct class *switchtec_class;
  33. static DEFINE_IDA(switchtec_minor_ida);
  34. #define MICROSEMI_VENDOR_ID 0x11f8
  35. #define MICROSEMI_NTB_CLASSCODE 0x068000
  36. #define MICROSEMI_MGMT_CLASSCODE 0x058000
  37. #define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
  38. #define SWITCHTEC_MAX_PFF_CSR 48
  39. #define SWITCHTEC_EVENT_OCCURRED BIT(0)
  40. #define SWITCHTEC_EVENT_CLEAR BIT(0)
  41. #define SWITCHTEC_EVENT_EN_LOG BIT(1)
  42. #define SWITCHTEC_EVENT_EN_CLI BIT(2)
  43. #define SWITCHTEC_EVENT_EN_IRQ BIT(3)
  44. #define SWITCHTEC_EVENT_FATAL BIT(4)
  45. enum {
  46. SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
  47. SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
  48. SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
  49. SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
  50. SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
  51. SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
  52. SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
  53. SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
  54. };
  55. struct mrpc_regs {
  56. u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
  57. u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
  58. u32 cmd;
  59. u32 status;
  60. u32 ret_value;
  61. } __packed;
  62. enum mrpc_status {
  63. SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
  64. SWITCHTEC_MRPC_STATUS_DONE = 2,
  65. SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
  66. SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
  67. };
  68. struct sw_event_regs {
  69. u64 event_report_ctrl;
  70. u64 reserved1;
  71. u64 part_event_bitmap;
  72. u64 reserved2;
  73. u32 global_summary;
  74. u32 reserved3[3];
  75. u32 stack_error_event_hdr;
  76. u32 stack_error_event_data;
  77. u32 reserved4[4];
  78. u32 ppu_error_event_hdr;
  79. u32 ppu_error_event_data;
  80. u32 reserved5[4];
  81. u32 isp_error_event_hdr;
  82. u32 isp_error_event_data;
  83. u32 reserved6[4];
  84. u32 sys_reset_event_hdr;
  85. u32 reserved7[5];
  86. u32 fw_exception_hdr;
  87. u32 reserved8[5];
  88. u32 fw_nmi_hdr;
  89. u32 reserved9[5];
  90. u32 fw_non_fatal_hdr;
  91. u32 reserved10[5];
  92. u32 fw_fatal_hdr;
  93. u32 reserved11[5];
  94. u32 twi_mrpc_comp_hdr;
  95. u32 twi_mrpc_comp_data;
  96. u32 reserved12[4];
  97. u32 twi_mrpc_comp_async_hdr;
  98. u32 twi_mrpc_comp_async_data;
  99. u32 reserved13[4];
  100. u32 cli_mrpc_comp_hdr;
  101. u32 cli_mrpc_comp_data;
  102. u32 reserved14[4];
  103. u32 cli_mrpc_comp_async_hdr;
  104. u32 cli_mrpc_comp_async_data;
  105. u32 reserved15[4];
  106. u32 gpio_interrupt_hdr;
  107. u32 gpio_interrupt_data;
  108. u32 reserved16[4];
  109. } __packed;
  110. enum {
  111. SWITCHTEC_CFG0_RUNNING = 0x04,
  112. SWITCHTEC_CFG1_RUNNING = 0x05,
  113. SWITCHTEC_IMG0_RUNNING = 0x03,
  114. SWITCHTEC_IMG1_RUNNING = 0x07,
  115. };
  116. struct sys_info_regs {
  117. u32 device_id;
  118. u32 device_version;
  119. u32 firmware_version;
  120. u32 reserved1;
  121. u32 vendor_table_revision;
  122. u32 table_format_version;
  123. u32 partition_id;
  124. u32 cfg_file_fmt_version;
  125. u16 cfg_running;
  126. u16 img_running;
  127. u32 reserved2[57];
  128. char vendor_id[8];
  129. char product_id[16];
  130. char product_revision[4];
  131. char component_vendor[8];
  132. u16 component_id;
  133. u8 component_revision;
  134. } __packed;
  135. struct flash_info_regs {
  136. u32 flash_part_map_upd_idx;
  137. struct active_partition_info {
  138. u32 address;
  139. u32 build_version;
  140. u32 build_string;
  141. } active_img;
  142. struct active_partition_info active_cfg;
  143. struct active_partition_info inactive_img;
  144. struct active_partition_info inactive_cfg;
  145. u32 flash_length;
  146. struct partition_info {
  147. u32 address;
  148. u32 length;
  149. } cfg0;
  150. struct partition_info cfg1;
  151. struct partition_info img0;
  152. struct partition_info img1;
  153. struct partition_info nvlog;
  154. struct partition_info vendor[8];
  155. };
  156. struct ntb_info_regs {
  157. u8 partition_count;
  158. u8 partition_id;
  159. u16 reserved1;
  160. u64 ep_map;
  161. u16 requester_id;
  162. } __packed;
  163. struct part_cfg_regs {
  164. u32 status;
  165. u32 state;
  166. u32 port_cnt;
  167. u32 usp_port_mode;
  168. u32 usp_pff_inst_id;
  169. u32 vep_pff_inst_id;
  170. u32 dsp_pff_inst_id[47];
  171. u32 reserved1[11];
  172. u16 vep_vector_number;
  173. u16 usp_vector_number;
  174. u32 port_event_bitmap;
  175. u32 reserved2[3];
  176. u32 part_event_summary;
  177. u32 reserved3[3];
  178. u32 part_reset_hdr;
  179. u32 part_reset_data[5];
  180. u32 mrpc_comp_hdr;
  181. u32 mrpc_comp_data[5];
  182. u32 mrpc_comp_async_hdr;
  183. u32 mrpc_comp_async_data[5];
  184. u32 dyn_binding_hdr;
  185. u32 dyn_binding_data[5];
  186. u32 reserved4[159];
  187. } __packed;
  188. enum {
  189. SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
  190. SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
  191. SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
  192. SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
  193. };
  194. struct pff_csr_regs {
  195. u16 vendor_id;
  196. u16 device_id;
  197. u32 pci_cfg_header[15];
  198. u32 pci_cap_region[48];
  199. u32 pcie_cap_region[448];
  200. u32 indirect_gas_window[128];
  201. u32 indirect_gas_window_off;
  202. u32 reserved[127];
  203. u32 pff_event_summary;
  204. u32 reserved2[3];
  205. u32 aer_in_p2p_hdr;
  206. u32 aer_in_p2p_data[5];
  207. u32 aer_in_vep_hdr;
  208. u32 aer_in_vep_data[5];
  209. u32 dpc_hdr;
  210. u32 dpc_data[5];
  211. u32 cts_hdr;
  212. u32 cts_data[5];
  213. u32 reserved3[6];
  214. u32 hotplug_hdr;
  215. u32 hotplug_data[5];
  216. u32 ier_hdr;
  217. u32 ier_data[5];
  218. u32 threshold_hdr;
  219. u32 threshold_data[5];
  220. u32 power_mgmt_hdr;
  221. u32 power_mgmt_data[5];
  222. u32 tlp_throttling_hdr;
  223. u32 tlp_throttling_data[5];
  224. u32 force_speed_hdr;
  225. u32 force_speed_data[5];
  226. u32 credit_timeout_hdr;
  227. u32 credit_timeout_data[5];
  228. u32 link_state_hdr;
  229. u32 link_state_data[5];
  230. u32 reserved4[174];
  231. } __packed;
  232. struct switchtec_dev {
  233. struct pci_dev *pdev;
  234. struct device dev;
  235. struct cdev cdev;
  236. int partition;
  237. int partition_count;
  238. int pff_csr_count;
  239. char pff_local[SWITCHTEC_MAX_PFF_CSR];
  240. void __iomem *mmio;
  241. struct mrpc_regs __iomem *mmio_mrpc;
  242. struct sw_event_regs __iomem *mmio_sw_event;
  243. struct sys_info_regs __iomem *mmio_sys_info;
  244. struct flash_info_regs __iomem *mmio_flash_info;
  245. struct ntb_info_regs __iomem *mmio_ntb;
  246. struct part_cfg_regs __iomem *mmio_part_cfg;
  247. struct part_cfg_regs __iomem *mmio_part_cfg_all;
  248. struct pff_csr_regs __iomem *mmio_pff_csr;
  249. /*
  250. * The mrpc mutex must be held when accessing the other
  251. * mrpc_ fields, alive flag and stuser->state field
  252. */
  253. struct mutex mrpc_mutex;
  254. struct list_head mrpc_queue;
  255. int mrpc_busy;
  256. struct work_struct mrpc_work;
  257. struct delayed_work mrpc_timeout;
  258. bool alive;
  259. wait_queue_head_t event_wq;
  260. atomic_t event_cnt;
  261. };
  262. static struct switchtec_dev *to_stdev(struct device *dev)
  263. {
  264. return container_of(dev, struct switchtec_dev, dev);
  265. }
  266. enum mrpc_state {
  267. MRPC_IDLE = 0,
  268. MRPC_QUEUED,
  269. MRPC_RUNNING,
  270. MRPC_DONE,
  271. };
  272. struct switchtec_user {
  273. struct switchtec_dev *stdev;
  274. enum mrpc_state state;
  275. struct completion comp;
  276. struct kref kref;
  277. struct list_head list;
  278. u32 cmd;
  279. u32 status;
  280. u32 return_code;
  281. size_t data_len;
  282. size_t read_len;
  283. unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
  284. int event_cnt;
  285. };
  286. static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
  287. {
  288. struct switchtec_user *stuser;
  289. stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
  290. if (!stuser)
  291. return ERR_PTR(-ENOMEM);
  292. get_device(&stdev->dev);
  293. stuser->stdev = stdev;
  294. kref_init(&stuser->kref);
  295. INIT_LIST_HEAD(&stuser->list);
  296. init_completion(&stuser->comp);
  297. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  298. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  299. return stuser;
  300. }
  301. static void stuser_free(struct kref *kref)
  302. {
  303. struct switchtec_user *stuser;
  304. stuser = container_of(kref, struct switchtec_user, kref);
  305. dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
  306. put_device(&stuser->stdev->dev);
  307. kfree(stuser);
  308. }
  309. static void stuser_put(struct switchtec_user *stuser)
  310. {
  311. kref_put(&stuser->kref, stuser_free);
  312. }
  313. static void stuser_set_state(struct switchtec_user *stuser,
  314. enum mrpc_state state)
  315. {
  316. /* requires the mrpc_mutex to already be held when called */
  317. const char * const state_names[] = {
  318. [MRPC_IDLE] = "IDLE",
  319. [MRPC_QUEUED] = "QUEUED",
  320. [MRPC_RUNNING] = "RUNNING",
  321. [MRPC_DONE] = "DONE",
  322. };
  323. stuser->state = state;
  324. dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
  325. stuser, state_names[state]);
  326. }
  327. static void mrpc_complete_cmd(struct switchtec_dev *stdev);
  328. static void mrpc_cmd_submit(struct switchtec_dev *stdev)
  329. {
  330. /* requires the mrpc_mutex to already be held when called */
  331. struct switchtec_user *stuser;
  332. if (stdev->mrpc_busy)
  333. return;
  334. if (list_empty(&stdev->mrpc_queue))
  335. return;
  336. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  337. list);
  338. stuser_set_state(stuser, MRPC_RUNNING);
  339. stdev->mrpc_busy = 1;
  340. memcpy_toio(&stdev->mmio_mrpc->input_data,
  341. stuser->data, stuser->data_len);
  342. iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
  343. stuser->status = ioread32(&stdev->mmio_mrpc->status);
  344. if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
  345. mrpc_complete_cmd(stdev);
  346. schedule_delayed_work(&stdev->mrpc_timeout,
  347. msecs_to_jiffies(500));
  348. }
  349. static int mrpc_queue_cmd(struct switchtec_user *stuser)
  350. {
  351. /* requires the mrpc_mutex to already be held when called */
  352. struct switchtec_dev *stdev = stuser->stdev;
  353. kref_get(&stuser->kref);
  354. stuser->read_len = sizeof(stuser->data);
  355. stuser_set_state(stuser, MRPC_QUEUED);
  356. init_completion(&stuser->comp);
  357. list_add_tail(&stuser->list, &stdev->mrpc_queue);
  358. mrpc_cmd_submit(stdev);
  359. return 0;
  360. }
  361. static void mrpc_complete_cmd(struct switchtec_dev *stdev)
  362. {
  363. /* requires the mrpc_mutex to already be held when called */
  364. struct switchtec_user *stuser;
  365. if (list_empty(&stdev->mrpc_queue))
  366. return;
  367. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  368. list);
  369. stuser->status = ioread32(&stdev->mmio_mrpc->status);
  370. if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
  371. return;
  372. stuser_set_state(stuser, MRPC_DONE);
  373. stuser->return_code = 0;
  374. if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
  375. goto out;
  376. stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
  377. if (stuser->return_code != 0)
  378. goto out;
  379. memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
  380. stuser->read_len);
  381. out:
  382. complete_all(&stuser->comp);
  383. list_del_init(&stuser->list);
  384. stuser_put(stuser);
  385. stdev->mrpc_busy = 0;
  386. mrpc_cmd_submit(stdev);
  387. }
  388. static void mrpc_event_work(struct work_struct *work)
  389. {
  390. struct switchtec_dev *stdev;
  391. stdev = container_of(work, struct switchtec_dev, mrpc_work);
  392. dev_dbg(&stdev->dev, "%s\n", __func__);
  393. mutex_lock(&stdev->mrpc_mutex);
  394. cancel_delayed_work(&stdev->mrpc_timeout);
  395. mrpc_complete_cmd(stdev);
  396. mutex_unlock(&stdev->mrpc_mutex);
  397. }
  398. static void mrpc_timeout_work(struct work_struct *work)
  399. {
  400. struct switchtec_dev *stdev;
  401. u32 status;
  402. stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
  403. dev_dbg(&stdev->dev, "%s\n", __func__);
  404. mutex_lock(&stdev->mrpc_mutex);
  405. status = ioread32(&stdev->mmio_mrpc->status);
  406. if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
  407. schedule_delayed_work(&stdev->mrpc_timeout,
  408. msecs_to_jiffies(500));
  409. goto out;
  410. }
  411. mrpc_complete_cmd(stdev);
  412. out:
  413. mutex_unlock(&stdev->mrpc_mutex);
  414. }
  415. static ssize_t device_version_show(struct device *dev,
  416. struct device_attribute *attr, char *buf)
  417. {
  418. struct switchtec_dev *stdev = to_stdev(dev);
  419. u32 ver;
  420. ver = ioread32(&stdev->mmio_sys_info->device_version);
  421. return sprintf(buf, "%x\n", ver);
  422. }
  423. static DEVICE_ATTR_RO(device_version);
  424. static ssize_t fw_version_show(struct device *dev,
  425. struct device_attribute *attr, char *buf)
  426. {
  427. struct switchtec_dev *stdev = to_stdev(dev);
  428. u32 ver;
  429. ver = ioread32(&stdev->mmio_sys_info->firmware_version);
  430. return sprintf(buf, "%08x\n", ver);
  431. }
  432. static DEVICE_ATTR_RO(fw_version);
  433. static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
  434. {
  435. int i;
  436. memcpy_fromio(buf, attr, len);
  437. buf[len] = '\n';
  438. buf[len + 1] = 0;
  439. for (i = len - 1; i > 0; i--) {
  440. if (buf[i] != ' ')
  441. break;
  442. buf[i] = '\n';
  443. buf[i + 1] = 0;
  444. }
  445. return strlen(buf);
  446. }
  447. #define DEVICE_ATTR_SYS_INFO_STR(field) \
  448. static ssize_t field ## _show(struct device *dev, \
  449. struct device_attribute *attr, char *buf) \
  450. { \
  451. struct switchtec_dev *stdev = to_stdev(dev); \
  452. return io_string_show(buf, &stdev->mmio_sys_info->field, \
  453. sizeof(stdev->mmio_sys_info->field)); \
  454. } \
  455. \
  456. static DEVICE_ATTR_RO(field)
  457. DEVICE_ATTR_SYS_INFO_STR(vendor_id);
  458. DEVICE_ATTR_SYS_INFO_STR(product_id);
  459. DEVICE_ATTR_SYS_INFO_STR(product_revision);
  460. DEVICE_ATTR_SYS_INFO_STR(component_vendor);
  461. static ssize_t component_id_show(struct device *dev,
  462. struct device_attribute *attr, char *buf)
  463. {
  464. struct switchtec_dev *stdev = to_stdev(dev);
  465. int id = ioread16(&stdev->mmio_sys_info->component_id);
  466. return sprintf(buf, "PM%04X\n", id);
  467. }
  468. static DEVICE_ATTR_RO(component_id);
  469. static ssize_t component_revision_show(struct device *dev,
  470. struct device_attribute *attr, char *buf)
  471. {
  472. struct switchtec_dev *stdev = to_stdev(dev);
  473. int rev = ioread8(&stdev->mmio_sys_info->component_revision);
  474. return sprintf(buf, "%d\n", rev);
  475. }
  476. static DEVICE_ATTR_RO(component_revision);
  477. static ssize_t partition_show(struct device *dev,
  478. struct device_attribute *attr, char *buf)
  479. {
  480. struct switchtec_dev *stdev = to_stdev(dev);
  481. return sprintf(buf, "%d\n", stdev->partition);
  482. }
  483. static DEVICE_ATTR_RO(partition);
  484. static ssize_t partition_count_show(struct device *dev,
  485. struct device_attribute *attr, char *buf)
  486. {
  487. struct switchtec_dev *stdev = to_stdev(dev);
  488. return sprintf(buf, "%d\n", stdev->partition_count);
  489. }
  490. static DEVICE_ATTR_RO(partition_count);
  491. static struct attribute *switchtec_device_attrs[] = {
  492. &dev_attr_device_version.attr,
  493. &dev_attr_fw_version.attr,
  494. &dev_attr_vendor_id.attr,
  495. &dev_attr_product_id.attr,
  496. &dev_attr_product_revision.attr,
  497. &dev_attr_component_vendor.attr,
  498. &dev_attr_component_id.attr,
  499. &dev_attr_component_revision.attr,
  500. &dev_attr_partition.attr,
  501. &dev_attr_partition_count.attr,
  502. NULL,
  503. };
  504. ATTRIBUTE_GROUPS(switchtec_device);
  505. static int switchtec_dev_open(struct inode *inode, struct file *filp)
  506. {
  507. struct switchtec_dev *stdev;
  508. struct switchtec_user *stuser;
  509. stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
  510. stuser = stuser_create(stdev);
  511. if (IS_ERR(stuser))
  512. return PTR_ERR(stuser);
  513. filp->private_data = stuser;
  514. nonseekable_open(inode, filp);
  515. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  516. return 0;
  517. }
  518. static int switchtec_dev_release(struct inode *inode, struct file *filp)
  519. {
  520. struct switchtec_user *stuser = filp->private_data;
  521. stuser_put(stuser);
  522. return 0;
  523. }
  524. static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
  525. {
  526. if (mutex_lock_interruptible(&stdev->mrpc_mutex))
  527. return -EINTR;
  528. if (!stdev->alive) {
  529. mutex_unlock(&stdev->mrpc_mutex);
  530. return -ENODEV;
  531. }
  532. return 0;
  533. }
  534. static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
  535. size_t size, loff_t *off)
  536. {
  537. struct switchtec_user *stuser = filp->private_data;
  538. struct switchtec_dev *stdev = stuser->stdev;
  539. int rc;
  540. if (size < sizeof(stuser->cmd) ||
  541. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  542. return -EINVAL;
  543. stuser->data_len = size - sizeof(stuser->cmd);
  544. rc = lock_mutex_and_test_alive(stdev);
  545. if (rc)
  546. return rc;
  547. if (stuser->state != MRPC_IDLE) {
  548. rc = -EBADE;
  549. goto out;
  550. }
  551. rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
  552. if (rc) {
  553. rc = -EFAULT;
  554. goto out;
  555. }
  556. data += sizeof(stuser->cmd);
  557. rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
  558. if (rc) {
  559. rc = -EFAULT;
  560. goto out;
  561. }
  562. rc = mrpc_queue_cmd(stuser);
  563. out:
  564. mutex_unlock(&stdev->mrpc_mutex);
  565. if (rc)
  566. return rc;
  567. return size;
  568. }
  569. static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
  570. size_t size, loff_t *off)
  571. {
  572. struct switchtec_user *stuser = filp->private_data;
  573. struct switchtec_dev *stdev = stuser->stdev;
  574. int rc;
  575. if (size < sizeof(stuser->cmd) ||
  576. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  577. return -EINVAL;
  578. rc = lock_mutex_and_test_alive(stdev);
  579. if (rc)
  580. return rc;
  581. if (stuser->state == MRPC_IDLE) {
  582. mutex_unlock(&stdev->mrpc_mutex);
  583. return -EBADE;
  584. }
  585. stuser->read_len = size - sizeof(stuser->return_code);
  586. mutex_unlock(&stdev->mrpc_mutex);
  587. if (filp->f_flags & O_NONBLOCK) {
  588. if (!try_wait_for_completion(&stuser->comp))
  589. return -EAGAIN;
  590. } else {
  591. rc = wait_for_completion_interruptible(&stuser->comp);
  592. if (rc < 0)
  593. return rc;
  594. }
  595. rc = lock_mutex_and_test_alive(stdev);
  596. if (rc)
  597. return rc;
  598. if (stuser->state != MRPC_DONE) {
  599. mutex_unlock(&stdev->mrpc_mutex);
  600. return -EBADE;
  601. }
  602. rc = copy_to_user(data, &stuser->return_code,
  603. sizeof(stuser->return_code));
  604. if (rc) {
  605. rc = -EFAULT;
  606. goto out;
  607. }
  608. data += sizeof(stuser->return_code);
  609. rc = copy_to_user(data, &stuser->data,
  610. size - sizeof(stuser->return_code));
  611. if (rc) {
  612. rc = -EFAULT;
  613. goto out;
  614. }
  615. stuser_set_state(stuser, MRPC_IDLE);
  616. out:
  617. mutex_unlock(&stdev->mrpc_mutex);
  618. if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
  619. return size;
  620. else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
  621. return -ENXIO;
  622. else
  623. return -EBADMSG;
  624. }
  625. static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
  626. {
  627. struct switchtec_user *stuser = filp->private_data;
  628. struct switchtec_dev *stdev = stuser->stdev;
  629. int ret = 0;
  630. poll_wait(filp, &stuser->comp.wait, wait);
  631. poll_wait(filp, &stdev->event_wq, wait);
  632. if (lock_mutex_and_test_alive(stdev))
  633. return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP;
  634. mutex_unlock(&stdev->mrpc_mutex);
  635. if (try_wait_for_completion(&stuser->comp))
  636. ret |= POLLIN | POLLRDNORM;
  637. if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
  638. ret |= POLLPRI | POLLRDBAND;
  639. return ret;
  640. }
  641. static int ioctl_flash_info(struct switchtec_dev *stdev,
  642. struct switchtec_ioctl_flash_info __user *uinfo)
  643. {
  644. struct switchtec_ioctl_flash_info info = {0};
  645. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  646. info.flash_length = ioread32(&fi->flash_length);
  647. info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
  648. if (copy_to_user(uinfo, &info, sizeof(info)))
  649. return -EFAULT;
  650. return 0;
  651. }
  652. static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
  653. struct partition_info __iomem *pi)
  654. {
  655. info->address = ioread32(&pi->address);
  656. info->length = ioread32(&pi->length);
  657. }
  658. static int ioctl_flash_part_info(struct switchtec_dev *stdev,
  659. struct switchtec_ioctl_flash_part_info __user *uinfo)
  660. {
  661. struct switchtec_ioctl_flash_part_info info = {0};
  662. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  663. struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
  664. u32 active_addr = -1;
  665. if (copy_from_user(&info, uinfo, sizeof(info)))
  666. return -EFAULT;
  667. switch (info.flash_partition) {
  668. case SWITCHTEC_IOCTL_PART_CFG0:
  669. active_addr = ioread32(&fi->active_cfg);
  670. set_fw_info_part(&info, &fi->cfg0);
  671. if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
  672. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  673. break;
  674. case SWITCHTEC_IOCTL_PART_CFG1:
  675. active_addr = ioread32(&fi->active_cfg);
  676. set_fw_info_part(&info, &fi->cfg1);
  677. if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
  678. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  679. break;
  680. case SWITCHTEC_IOCTL_PART_IMG0:
  681. active_addr = ioread32(&fi->active_img);
  682. set_fw_info_part(&info, &fi->img0);
  683. if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
  684. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  685. break;
  686. case SWITCHTEC_IOCTL_PART_IMG1:
  687. active_addr = ioread32(&fi->active_img);
  688. set_fw_info_part(&info, &fi->img1);
  689. if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
  690. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  691. break;
  692. case SWITCHTEC_IOCTL_PART_NVLOG:
  693. set_fw_info_part(&info, &fi->nvlog);
  694. break;
  695. case SWITCHTEC_IOCTL_PART_VENDOR0:
  696. set_fw_info_part(&info, &fi->vendor[0]);
  697. break;
  698. case SWITCHTEC_IOCTL_PART_VENDOR1:
  699. set_fw_info_part(&info, &fi->vendor[1]);
  700. break;
  701. case SWITCHTEC_IOCTL_PART_VENDOR2:
  702. set_fw_info_part(&info, &fi->vendor[2]);
  703. break;
  704. case SWITCHTEC_IOCTL_PART_VENDOR3:
  705. set_fw_info_part(&info, &fi->vendor[3]);
  706. break;
  707. case SWITCHTEC_IOCTL_PART_VENDOR4:
  708. set_fw_info_part(&info, &fi->vendor[4]);
  709. break;
  710. case SWITCHTEC_IOCTL_PART_VENDOR5:
  711. set_fw_info_part(&info, &fi->vendor[5]);
  712. break;
  713. case SWITCHTEC_IOCTL_PART_VENDOR6:
  714. set_fw_info_part(&info, &fi->vendor[6]);
  715. break;
  716. case SWITCHTEC_IOCTL_PART_VENDOR7:
  717. set_fw_info_part(&info, &fi->vendor[7]);
  718. break;
  719. default:
  720. return -EINVAL;
  721. }
  722. if (info.address == active_addr)
  723. info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  724. if (copy_to_user(uinfo, &info, sizeof(info)))
  725. return -EFAULT;
  726. return 0;
  727. }
  728. static int ioctl_event_summary(struct switchtec_dev *stdev,
  729. struct switchtec_user *stuser,
  730. struct switchtec_ioctl_event_summary __user *usum)
  731. {
  732. struct switchtec_ioctl_event_summary s = {0};
  733. int i;
  734. u32 reg;
  735. s.global = ioread32(&stdev->mmio_sw_event->global_summary);
  736. s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
  737. s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
  738. for (i = 0; i < stdev->partition_count; i++) {
  739. reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
  740. s.part[i] = reg;
  741. }
  742. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  743. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  744. if (reg != MICROSEMI_VENDOR_ID)
  745. break;
  746. reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
  747. s.pff[i] = reg;
  748. }
  749. if (copy_to_user(usum, &s, sizeof(s)))
  750. return -EFAULT;
  751. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  752. return 0;
  753. }
  754. static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
  755. size_t offset, int index)
  756. {
  757. return (void __iomem *)stdev->mmio_sw_event + offset;
  758. }
  759. static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
  760. size_t offset, int index)
  761. {
  762. return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
  763. }
  764. static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
  765. size_t offset, int index)
  766. {
  767. return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
  768. }
  769. #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
  770. #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
  771. #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
  772. static const struct event_reg {
  773. size_t offset;
  774. u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
  775. size_t offset, int index);
  776. } event_regs[] = {
  777. EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
  778. EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
  779. EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
  780. EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
  781. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
  782. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
  783. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
  784. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
  785. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
  786. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
  787. twi_mrpc_comp_async_hdr),
  788. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
  789. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
  790. cli_mrpc_comp_async_hdr),
  791. EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
  792. EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
  793. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
  794. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
  795. EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
  796. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
  797. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
  798. EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
  799. EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
  800. EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
  801. EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
  802. EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
  803. EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
  804. EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
  805. EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
  806. EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
  807. EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
  808. };
  809. static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
  810. int event_id, int index)
  811. {
  812. size_t off;
  813. if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  814. return ERR_PTR(-EINVAL);
  815. off = event_regs[event_id].offset;
  816. if (event_regs[event_id].map_reg == part_ev_reg) {
  817. if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  818. index = stdev->partition;
  819. else if (index < 0 || index >= stdev->partition_count)
  820. return ERR_PTR(-EINVAL);
  821. } else if (event_regs[event_id].map_reg == pff_ev_reg) {
  822. if (index < 0 || index >= stdev->pff_csr_count)
  823. return ERR_PTR(-EINVAL);
  824. }
  825. return event_regs[event_id].map_reg(stdev, off, index);
  826. }
  827. static int event_ctl(struct switchtec_dev *stdev,
  828. struct switchtec_ioctl_event_ctl *ctl)
  829. {
  830. int i;
  831. u32 __iomem *reg;
  832. u32 hdr;
  833. reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
  834. if (IS_ERR(reg))
  835. return PTR_ERR(reg);
  836. hdr = ioread32(reg);
  837. for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
  838. ctl->data[i] = ioread32(&reg[i + 1]);
  839. ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
  840. ctl->count = (hdr >> 5) & 0xFF;
  841. if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
  842. hdr &= ~SWITCHTEC_EVENT_CLEAR;
  843. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
  844. hdr |= SWITCHTEC_EVENT_EN_IRQ;
  845. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
  846. hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
  847. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
  848. hdr |= SWITCHTEC_EVENT_EN_LOG;
  849. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
  850. hdr &= ~SWITCHTEC_EVENT_EN_LOG;
  851. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
  852. hdr |= SWITCHTEC_EVENT_EN_CLI;
  853. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
  854. hdr &= ~SWITCHTEC_EVENT_EN_CLI;
  855. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
  856. hdr |= SWITCHTEC_EVENT_FATAL;
  857. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
  858. hdr &= ~SWITCHTEC_EVENT_FATAL;
  859. if (ctl->flags)
  860. iowrite32(hdr, reg);
  861. ctl->flags = 0;
  862. if (hdr & SWITCHTEC_EVENT_EN_IRQ)
  863. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
  864. if (hdr & SWITCHTEC_EVENT_EN_LOG)
  865. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
  866. if (hdr & SWITCHTEC_EVENT_EN_CLI)
  867. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
  868. if (hdr & SWITCHTEC_EVENT_FATAL)
  869. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
  870. return 0;
  871. }
  872. static int ioctl_event_ctl(struct switchtec_dev *stdev,
  873. struct switchtec_ioctl_event_ctl __user *uctl)
  874. {
  875. int ret;
  876. int nr_idxs;
  877. struct switchtec_ioctl_event_ctl ctl;
  878. if (copy_from_user(&ctl, uctl, sizeof(ctl)))
  879. return -EFAULT;
  880. if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  881. return -EINVAL;
  882. if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
  883. return -EINVAL;
  884. if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
  885. if (event_regs[ctl.event_id].map_reg == global_ev_reg)
  886. nr_idxs = 1;
  887. else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
  888. nr_idxs = stdev->partition_count;
  889. else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
  890. nr_idxs = stdev->pff_csr_count;
  891. else
  892. return -EINVAL;
  893. for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
  894. ret = event_ctl(stdev, &ctl);
  895. if (ret < 0)
  896. return ret;
  897. }
  898. } else {
  899. ret = event_ctl(stdev, &ctl);
  900. if (ret < 0)
  901. return ret;
  902. }
  903. if (copy_to_user(uctl, &ctl, sizeof(ctl)))
  904. return -EFAULT;
  905. return 0;
  906. }
  907. static int ioctl_pff_to_port(struct switchtec_dev *stdev,
  908. struct switchtec_ioctl_pff_port *up)
  909. {
  910. int i, part;
  911. u32 reg;
  912. struct part_cfg_regs *pcfg;
  913. struct switchtec_ioctl_pff_port p;
  914. if (copy_from_user(&p, up, sizeof(p)))
  915. return -EFAULT;
  916. p.port = -1;
  917. for (part = 0; part < stdev->partition_count; part++) {
  918. pcfg = &stdev->mmio_part_cfg_all[part];
  919. p.partition = part;
  920. reg = ioread32(&pcfg->usp_pff_inst_id);
  921. if (reg == p.pff) {
  922. p.port = 0;
  923. break;
  924. }
  925. reg = ioread32(&pcfg->vep_pff_inst_id);
  926. if (reg == p.pff) {
  927. p.port = SWITCHTEC_IOCTL_PFF_VEP;
  928. break;
  929. }
  930. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  931. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  932. if (reg != p.pff)
  933. continue;
  934. p.port = i + 1;
  935. break;
  936. }
  937. if (p.port != -1)
  938. break;
  939. }
  940. if (copy_to_user(up, &p, sizeof(p)))
  941. return -EFAULT;
  942. return 0;
  943. }
  944. static int ioctl_port_to_pff(struct switchtec_dev *stdev,
  945. struct switchtec_ioctl_pff_port *up)
  946. {
  947. struct switchtec_ioctl_pff_port p;
  948. struct part_cfg_regs *pcfg;
  949. if (copy_from_user(&p, up, sizeof(p)))
  950. return -EFAULT;
  951. if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  952. pcfg = stdev->mmio_part_cfg;
  953. else if (p.partition < stdev->partition_count)
  954. pcfg = &stdev->mmio_part_cfg_all[p.partition];
  955. else
  956. return -EINVAL;
  957. switch (p.port) {
  958. case 0:
  959. p.pff = ioread32(&pcfg->usp_pff_inst_id);
  960. break;
  961. case SWITCHTEC_IOCTL_PFF_VEP:
  962. p.pff = ioread32(&pcfg->vep_pff_inst_id);
  963. break;
  964. default:
  965. if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
  966. return -EINVAL;
  967. p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
  968. break;
  969. }
  970. if (copy_to_user(up, &p, sizeof(p)))
  971. return -EFAULT;
  972. return 0;
  973. }
  974. static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
  975. unsigned long arg)
  976. {
  977. struct switchtec_user *stuser = filp->private_data;
  978. struct switchtec_dev *stdev = stuser->stdev;
  979. int rc;
  980. void __user *argp = (void __user *)arg;
  981. rc = lock_mutex_and_test_alive(stdev);
  982. if (rc)
  983. return rc;
  984. switch (cmd) {
  985. case SWITCHTEC_IOCTL_FLASH_INFO:
  986. rc = ioctl_flash_info(stdev, argp);
  987. break;
  988. case SWITCHTEC_IOCTL_FLASH_PART_INFO:
  989. rc = ioctl_flash_part_info(stdev, argp);
  990. break;
  991. case SWITCHTEC_IOCTL_EVENT_SUMMARY:
  992. rc = ioctl_event_summary(stdev, stuser, argp);
  993. break;
  994. case SWITCHTEC_IOCTL_EVENT_CTL:
  995. rc = ioctl_event_ctl(stdev, argp);
  996. break;
  997. case SWITCHTEC_IOCTL_PFF_TO_PORT:
  998. rc = ioctl_pff_to_port(stdev, argp);
  999. break;
  1000. case SWITCHTEC_IOCTL_PORT_TO_PFF:
  1001. rc = ioctl_port_to_pff(stdev, argp);
  1002. break;
  1003. default:
  1004. rc = -ENOTTY;
  1005. break;
  1006. }
  1007. mutex_unlock(&stdev->mrpc_mutex);
  1008. return rc;
  1009. }
  1010. static const struct file_operations switchtec_fops = {
  1011. .owner = THIS_MODULE,
  1012. .open = switchtec_dev_open,
  1013. .release = switchtec_dev_release,
  1014. .write = switchtec_dev_write,
  1015. .read = switchtec_dev_read,
  1016. .poll = switchtec_dev_poll,
  1017. .unlocked_ioctl = switchtec_dev_ioctl,
  1018. .compat_ioctl = switchtec_dev_ioctl,
  1019. };
  1020. static void stdev_release(struct device *dev)
  1021. {
  1022. struct switchtec_dev *stdev = to_stdev(dev);
  1023. kfree(stdev);
  1024. }
  1025. static void stdev_kill(struct switchtec_dev *stdev)
  1026. {
  1027. struct switchtec_user *stuser, *tmpuser;
  1028. pci_clear_master(stdev->pdev);
  1029. cancel_delayed_work_sync(&stdev->mrpc_timeout);
  1030. /* Mark the hardware as unavailable and complete all completions */
  1031. mutex_lock(&stdev->mrpc_mutex);
  1032. stdev->alive = false;
  1033. /* Wake up and kill any users waiting on an MRPC request */
  1034. list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
  1035. complete_all(&stuser->comp);
  1036. list_del_init(&stuser->list);
  1037. stuser_put(stuser);
  1038. }
  1039. mutex_unlock(&stdev->mrpc_mutex);
  1040. /* Wake up any users waiting on event_wq */
  1041. wake_up_interruptible(&stdev->event_wq);
  1042. }
  1043. static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
  1044. {
  1045. struct switchtec_dev *stdev;
  1046. int minor;
  1047. struct device *dev;
  1048. struct cdev *cdev;
  1049. int rc;
  1050. stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
  1051. dev_to_node(&pdev->dev));
  1052. if (!stdev)
  1053. return ERR_PTR(-ENOMEM);
  1054. stdev->alive = true;
  1055. stdev->pdev = pdev;
  1056. INIT_LIST_HEAD(&stdev->mrpc_queue);
  1057. mutex_init(&stdev->mrpc_mutex);
  1058. stdev->mrpc_busy = 0;
  1059. INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
  1060. INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
  1061. init_waitqueue_head(&stdev->event_wq);
  1062. atomic_set(&stdev->event_cnt, 0);
  1063. dev = &stdev->dev;
  1064. device_initialize(dev);
  1065. dev->class = switchtec_class;
  1066. dev->parent = &pdev->dev;
  1067. dev->groups = switchtec_device_groups;
  1068. dev->release = stdev_release;
  1069. minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
  1070. GFP_KERNEL);
  1071. if (minor < 0) {
  1072. rc = minor;
  1073. goto err_put;
  1074. }
  1075. dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
  1076. dev_set_name(dev, "switchtec%d", minor);
  1077. cdev = &stdev->cdev;
  1078. cdev_init(cdev, &switchtec_fops);
  1079. cdev->owner = THIS_MODULE;
  1080. return stdev;
  1081. err_put:
  1082. put_device(&stdev->dev);
  1083. return ERR_PTR(rc);
  1084. }
  1085. static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
  1086. {
  1087. size_t off = event_regs[eid].offset;
  1088. u32 __iomem *hdr_reg;
  1089. u32 hdr;
  1090. hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
  1091. hdr = ioread32(hdr_reg);
  1092. if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
  1093. return 0;
  1094. dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
  1095. hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
  1096. iowrite32(hdr, hdr_reg);
  1097. return 1;
  1098. }
  1099. static int mask_all_events(struct switchtec_dev *stdev, int eid)
  1100. {
  1101. int idx;
  1102. int count = 0;
  1103. if (event_regs[eid].map_reg == part_ev_reg) {
  1104. for (idx = 0; idx < stdev->partition_count; idx++)
  1105. count += mask_event(stdev, eid, idx);
  1106. } else if (event_regs[eid].map_reg == pff_ev_reg) {
  1107. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  1108. if (!stdev->pff_local[idx])
  1109. continue;
  1110. count += mask_event(stdev, eid, idx);
  1111. }
  1112. } else {
  1113. count += mask_event(stdev, eid, 0);
  1114. }
  1115. return count;
  1116. }
  1117. static irqreturn_t switchtec_event_isr(int irq, void *dev)
  1118. {
  1119. struct switchtec_dev *stdev = dev;
  1120. u32 reg;
  1121. irqreturn_t ret = IRQ_NONE;
  1122. int eid, event_count = 0;
  1123. reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
  1124. if (reg & SWITCHTEC_EVENT_OCCURRED) {
  1125. dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
  1126. ret = IRQ_HANDLED;
  1127. schedule_work(&stdev->mrpc_work);
  1128. iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
  1129. }
  1130. for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
  1131. event_count += mask_all_events(stdev, eid);
  1132. if (event_count) {
  1133. atomic_inc(&stdev->event_cnt);
  1134. wake_up_interruptible(&stdev->event_wq);
  1135. dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
  1136. event_count);
  1137. return IRQ_HANDLED;
  1138. }
  1139. return ret;
  1140. }
  1141. static int switchtec_init_isr(struct switchtec_dev *stdev)
  1142. {
  1143. int nvecs;
  1144. int event_irq;
  1145. nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
  1146. PCI_IRQ_MSIX | PCI_IRQ_MSI);
  1147. if (nvecs < 0)
  1148. return nvecs;
  1149. event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
  1150. if (event_irq < 0 || event_irq >= nvecs)
  1151. return -EFAULT;
  1152. event_irq = pci_irq_vector(stdev->pdev, event_irq);
  1153. if (event_irq < 0)
  1154. return event_irq;
  1155. return devm_request_irq(&stdev->pdev->dev, event_irq,
  1156. switchtec_event_isr, 0,
  1157. KBUILD_MODNAME, stdev);
  1158. }
  1159. static void init_pff(struct switchtec_dev *stdev)
  1160. {
  1161. int i;
  1162. u32 reg;
  1163. struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
  1164. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  1165. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  1166. if (reg != MICROSEMI_VENDOR_ID)
  1167. break;
  1168. }
  1169. stdev->pff_csr_count = i;
  1170. reg = ioread32(&pcfg->usp_pff_inst_id);
  1171. if (reg < SWITCHTEC_MAX_PFF_CSR)
  1172. stdev->pff_local[reg] = 1;
  1173. reg = ioread32(&pcfg->vep_pff_inst_id);
  1174. if (reg < SWITCHTEC_MAX_PFF_CSR)
  1175. stdev->pff_local[reg] = 1;
  1176. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  1177. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  1178. if (reg < SWITCHTEC_MAX_PFF_CSR)
  1179. stdev->pff_local[reg] = 1;
  1180. }
  1181. }
  1182. static int switchtec_init_pci(struct switchtec_dev *stdev,
  1183. struct pci_dev *pdev)
  1184. {
  1185. int rc;
  1186. rc = pcim_enable_device(pdev);
  1187. if (rc)
  1188. return rc;
  1189. rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
  1190. if (rc)
  1191. return rc;
  1192. pci_set_master(pdev);
  1193. stdev->mmio = pcim_iomap_table(pdev)[0];
  1194. stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
  1195. stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
  1196. stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
  1197. stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
  1198. stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
  1199. stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
  1200. stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
  1201. stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
  1202. stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
  1203. stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
  1204. if (stdev->partition_count < 1)
  1205. stdev->partition_count = 1;
  1206. init_pff(stdev);
  1207. pci_set_drvdata(pdev, stdev);
  1208. return 0;
  1209. }
  1210. static int switchtec_pci_probe(struct pci_dev *pdev,
  1211. const struct pci_device_id *id)
  1212. {
  1213. struct switchtec_dev *stdev;
  1214. int rc;
  1215. stdev = stdev_create(pdev);
  1216. if (IS_ERR(stdev))
  1217. return PTR_ERR(stdev);
  1218. rc = switchtec_init_pci(stdev, pdev);
  1219. if (rc)
  1220. goto err_put;
  1221. rc = switchtec_init_isr(stdev);
  1222. if (rc) {
  1223. dev_err(&stdev->dev, "failed to init isr.\n");
  1224. goto err_put;
  1225. }
  1226. iowrite32(SWITCHTEC_EVENT_CLEAR |
  1227. SWITCHTEC_EVENT_EN_IRQ,
  1228. &stdev->mmio_part_cfg->mrpc_comp_hdr);
  1229. rc = cdev_device_add(&stdev->cdev, &stdev->dev);
  1230. if (rc)
  1231. goto err_devadd;
  1232. dev_info(&stdev->dev, "Management device registered.\n");
  1233. return 0;
  1234. err_devadd:
  1235. stdev_kill(stdev);
  1236. err_put:
  1237. ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1238. put_device(&stdev->dev);
  1239. return rc;
  1240. }
  1241. static void switchtec_pci_remove(struct pci_dev *pdev)
  1242. {
  1243. struct switchtec_dev *stdev = pci_get_drvdata(pdev);
  1244. pci_set_drvdata(pdev, NULL);
  1245. cdev_device_del(&stdev->cdev, &stdev->dev);
  1246. ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1247. dev_info(&stdev->dev, "unregistered.\n");
  1248. stdev_kill(stdev);
  1249. put_device(&stdev->dev);
  1250. }
  1251. #define SWITCHTEC_PCI_DEVICE(device_id) \
  1252. { \
  1253. .vendor = MICROSEMI_VENDOR_ID, \
  1254. .device = device_id, \
  1255. .subvendor = PCI_ANY_ID, \
  1256. .subdevice = PCI_ANY_ID, \
  1257. .class = MICROSEMI_MGMT_CLASSCODE, \
  1258. .class_mask = 0xFFFFFFFF, \
  1259. }, \
  1260. { \
  1261. .vendor = MICROSEMI_VENDOR_ID, \
  1262. .device = device_id, \
  1263. .subvendor = PCI_ANY_ID, \
  1264. .subdevice = PCI_ANY_ID, \
  1265. .class = MICROSEMI_NTB_CLASSCODE, \
  1266. .class_mask = 0xFFFFFFFF, \
  1267. }
  1268. static const struct pci_device_id switchtec_pci_tbl[] = {
  1269. SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
  1270. SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
  1271. SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
  1272. SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
  1273. SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
  1274. SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
  1275. SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
  1276. SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
  1277. SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
  1278. SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
  1279. SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3
  1280. SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3
  1281. SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3
  1282. SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3
  1283. SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3
  1284. SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3
  1285. SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3
  1286. SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3
  1287. SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3
  1288. SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3
  1289. SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3
  1290. SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3
  1291. SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3
  1292. SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3
  1293. SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3
  1294. SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3
  1295. SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3
  1296. SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3
  1297. {0}
  1298. };
  1299. MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
  1300. static struct pci_driver switchtec_pci_driver = {
  1301. .name = KBUILD_MODNAME,
  1302. .id_table = switchtec_pci_tbl,
  1303. .probe = switchtec_pci_probe,
  1304. .remove = switchtec_pci_remove,
  1305. };
  1306. static int __init switchtec_init(void)
  1307. {
  1308. int rc;
  1309. rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
  1310. "switchtec");
  1311. if (rc)
  1312. return rc;
  1313. switchtec_class = class_create(THIS_MODULE, "switchtec");
  1314. if (IS_ERR(switchtec_class)) {
  1315. rc = PTR_ERR(switchtec_class);
  1316. goto err_create_class;
  1317. }
  1318. rc = pci_register_driver(&switchtec_pci_driver);
  1319. if (rc)
  1320. goto err_pci_register;
  1321. pr_info(KBUILD_MODNAME ": loaded.\n");
  1322. return 0;
  1323. err_pci_register:
  1324. class_destroy(switchtec_class);
  1325. err_create_class:
  1326. unregister_chrdev_region(switchtec_devt, max_devices);
  1327. return rc;
  1328. }
  1329. module_init(switchtec_init);
  1330. static void __exit switchtec_exit(void)
  1331. {
  1332. pci_unregister_driver(&switchtec_pci_driver);
  1333. class_destroy(switchtec_class);
  1334. unregister_chrdev_region(switchtec_devt, max_devices);
  1335. ida_destroy(&switchtec_minor_ida);
  1336. pr_info(KBUILD_MODNAME ": unloaded.\n");
  1337. }
  1338. module_exit(switchtec_exit);