switchtec.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596
  1. /*
  2. * Microsemi Switchtec(tm) PCIe Management Driver
  3. * Copyright (c) 2017, Microsemi Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. */
  15. #include <linux/switchtec_ioctl.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/module.h>
  18. #include <linux/fs.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/poll.h>
  21. #include <linux/pci.h>
  22. #include <linux/cdev.h>
  23. #include <linux/wait.h>
  24. MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
  25. MODULE_VERSION("0.1");
  26. MODULE_LICENSE("GPL");
  27. MODULE_AUTHOR("Microsemi Corporation");
  28. static int max_devices = 16;
  29. module_param(max_devices, int, 0644);
  30. MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
  31. static dev_t switchtec_devt;
  32. static struct class *switchtec_class;
  33. static DEFINE_IDA(switchtec_minor_ida);
  34. #define MICROSEMI_VENDOR_ID 0x11f8
  35. #define MICROSEMI_NTB_CLASSCODE 0x068000
  36. #define MICROSEMI_MGMT_CLASSCODE 0x058000
  37. #define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
  38. #define SWITCHTEC_MAX_PFF_CSR 48
  39. #define SWITCHTEC_EVENT_OCCURRED BIT(0)
  40. #define SWITCHTEC_EVENT_CLEAR BIT(0)
  41. #define SWITCHTEC_EVENT_EN_LOG BIT(1)
  42. #define SWITCHTEC_EVENT_EN_CLI BIT(2)
  43. #define SWITCHTEC_EVENT_EN_IRQ BIT(3)
  44. #define SWITCHTEC_EVENT_FATAL BIT(4)
  45. enum {
  46. SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
  47. SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
  48. SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
  49. SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
  50. SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
  51. SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
  52. SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
  53. SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
  54. };
  55. struct mrpc_regs {
  56. u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
  57. u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
  58. u32 cmd;
  59. u32 status;
  60. u32 ret_value;
  61. } __packed;
  62. enum mrpc_status {
  63. SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
  64. SWITCHTEC_MRPC_STATUS_DONE = 2,
  65. SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
  66. SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
  67. };
  68. struct sw_event_regs {
  69. u64 event_report_ctrl;
  70. u64 reserved1;
  71. u64 part_event_bitmap;
  72. u64 reserved2;
  73. u32 global_summary;
  74. u32 reserved3[3];
  75. u32 stack_error_event_hdr;
  76. u32 stack_error_event_data;
  77. u32 reserved4[4];
  78. u32 ppu_error_event_hdr;
  79. u32 ppu_error_event_data;
  80. u32 reserved5[4];
  81. u32 isp_error_event_hdr;
  82. u32 isp_error_event_data;
  83. u32 reserved6[4];
  84. u32 sys_reset_event_hdr;
  85. u32 reserved7[5];
  86. u32 fw_exception_hdr;
  87. u32 reserved8[5];
  88. u32 fw_nmi_hdr;
  89. u32 reserved9[5];
  90. u32 fw_non_fatal_hdr;
  91. u32 reserved10[5];
  92. u32 fw_fatal_hdr;
  93. u32 reserved11[5];
  94. u32 twi_mrpc_comp_hdr;
  95. u32 twi_mrpc_comp_data;
  96. u32 reserved12[4];
  97. u32 twi_mrpc_comp_async_hdr;
  98. u32 twi_mrpc_comp_async_data;
  99. u32 reserved13[4];
  100. u32 cli_mrpc_comp_hdr;
  101. u32 cli_mrpc_comp_data;
  102. u32 reserved14[4];
  103. u32 cli_mrpc_comp_async_hdr;
  104. u32 cli_mrpc_comp_async_data;
  105. u32 reserved15[4];
  106. u32 gpio_interrupt_hdr;
  107. u32 gpio_interrupt_data;
  108. u32 reserved16[4];
  109. } __packed;
  110. struct sys_info_regs {
  111. u32 device_id;
  112. u32 device_version;
  113. u32 firmware_version;
  114. u32 reserved1;
  115. u32 vendor_table_revision;
  116. u32 table_format_version;
  117. u32 partition_id;
  118. u32 cfg_file_fmt_version;
  119. u32 reserved2[58];
  120. char vendor_id[8];
  121. char product_id[16];
  122. char product_revision[4];
  123. char component_vendor[8];
  124. u16 component_id;
  125. u8 component_revision;
  126. } __packed;
  127. struct flash_info_regs {
  128. u32 flash_part_map_upd_idx;
  129. struct active_partition_info {
  130. u32 address;
  131. u32 build_version;
  132. u32 build_string;
  133. } active_img;
  134. struct active_partition_info active_cfg;
  135. struct active_partition_info inactive_img;
  136. struct active_partition_info inactive_cfg;
  137. u32 flash_length;
  138. struct partition_info {
  139. u32 address;
  140. u32 length;
  141. } cfg0;
  142. struct partition_info cfg1;
  143. struct partition_info img0;
  144. struct partition_info img1;
  145. struct partition_info nvlog;
  146. struct partition_info vendor[8];
  147. };
  148. struct ntb_info_regs {
  149. u8 partition_count;
  150. u8 partition_id;
  151. u16 reserved1;
  152. u64 ep_map;
  153. u16 requester_id;
  154. } __packed;
  155. struct part_cfg_regs {
  156. u32 status;
  157. u32 state;
  158. u32 port_cnt;
  159. u32 usp_port_mode;
  160. u32 usp_pff_inst_id;
  161. u32 vep_pff_inst_id;
  162. u32 dsp_pff_inst_id[47];
  163. u32 reserved1[11];
  164. u16 vep_vector_number;
  165. u16 usp_vector_number;
  166. u32 port_event_bitmap;
  167. u32 reserved2[3];
  168. u32 part_event_summary;
  169. u32 reserved3[3];
  170. u32 part_reset_hdr;
  171. u32 part_reset_data[5];
  172. u32 mrpc_comp_hdr;
  173. u32 mrpc_comp_data[5];
  174. u32 mrpc_comp_async_hdr;
  175. u32 mrpc_comp_async_data[5];
  176. u32 dyn_binding_hdr;
  177. u32 dyn_binding_data[5];
  178. u32 reserved4[159];
  179. } __packed;
  180. enum {
  181. SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
  182. SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
  183. SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
  184. SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
  185. };
  186. struct pff_csr_regs {
  187. u16 vendor_id;
  188. u16 device_id;
  189. u32 pci_cfg_header[15];
  190. u32 pci_cap_region[48];
  191. u32 pcie_cap_region[448];
  192. u32 indirect_gas_window[128];
  193. u32 indirect_gas_window_off;
  194. u32 reserved[127];
  195. u32 pff_event_summary;
  196. u32 reserved2[3];
  197. u32 aer_in_p2p_hdr;
  198. u32 aer_in_p2p_data[5];
  199. u32 aer_in_vep_hdr;
  200. u32 aer_in_vep_data[5];
  201. u32 dpc_hdr;
  202. u32 dpc_data[5];
  203. u32 cts_hdr;
  204. u32 cts_data[5];
  205. u32 reserved3[6];
  206. u32 hotplug_hdr;
  207. u32 hotplug_data[5];
  208. u32 ier_hdr;
  209. u32 ier_data[5];
  210. u32 threshold_hdr;
  211. u32 threshold_data[5];
  212. u32 power_mgmt_hdr;
  213. u32 power_mgmt_data[5];
  214. u32 tlp_throttling_hdr;
  215. u32 tlp_throttling_data[5];
  216. u32 force_speed_hdr;
  217. u32 force_speed_data[5];
  218. u32 credit_timeout_hdr;
  219. u32 credit_timeout_data[5];
  220. u32 link_state_hdr;
  221. u32 link_state_data[5];
  222. u32 reserved4[174];
  223. } __packed;
  224. struct switchtec_dev {
  225. struct pci_dev *pdev;
  226. struct device dev;
  227. struct cdev cdev;
  228. int partition;
  229. int partition_count;
  230. int pff_csr_count;
  231. char pff_local[SWITCHTEC_MAX_PFF_CSR];
  232. void __iomem *mmio;
  233. struct mrpc_regs __iomem *mmio_mrpc;
  234. struct sw_event_regs __iomem *mmio_sw_event;
  235. struct sys_info_regs __iomem *mmio_sys_info;
  236. struct flash_info_regs __iomem *mmio_flash_info;
  237. struct ntb_info_regs __iomem *mmio_ntb;
  238. struct part_cfg_regs __iomem *mmio_part_cfg;
  239. struct part_cfg_regs __iomem *mmio_part_cfg_all;
  240. struct pff_csr_regs __iomem *mmio_pff_csr;
  241. /*
  242. * The mrpc mutex must be held when accessing the other
  243. * mrpc_ fields, alive flag and stuser->state field
  244. */
  245. struct mutex mrpc_mutex;
  246. struct list_head mrpc_queue;
  247. int mrpc_busy;
  248. struct work_struct mrpc_work;
  249. struct delayed_work mrpc_timeout;
  250. bool alive;
  251. wait_queue_head_t event_wq;
  252. atomic_t event_cnt;
  253. };
  254. static struct switchtec_dev *to_stdev(struct device *dev)
  255. {
  256. return container_of(dev, struct switchtec_dev, dev);
  257. }
  258. enum mrpc_state {
  259. MRPC_IDLE = 0,
  260. MRPC_QUEUED,
  261. MRPC_RUNNING,
  262. MRPC_DONE,
  263. };
  264. struct switchtec_user {
  265. struct switchtec_dev *stdev;
  266. enum mrpc_state state;
  267. struct completion comp;
  268. struct kref kref;
  269. struct list_head list;
  270. u32 cmd;
  271. u32 status;
  272. u32 return_code;
  273. size_t data_len;
  274. size_t read_len;
  275. unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
  276. int event_cnt;
  277. };
  278. static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
  279. {
  280. struct switchtec_user *stuser;
  281. stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
  282. if (!stuser)
  283. return ERR_PTR(-ENOMEM);
  284. get_device(&stdev->dev);
  285. stuser->stdev = stdev;
  286. kref_init(&stuser->kref);
  287. INIT_LIST_HEAD(&stuser->list);
  288. init_completion(&stuser->comp);
  289. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  290. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  291. return stuser;
  292. }
  293. static void stuser_free(struct kref *kref)
  294. {
  295. struct switchtec_user *stuser;
  296. stuser = container_of(kref, struct switchtec_user, kref);
  297. dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
  298. put_device(&stuser->stdev->dev);
  299. kfree(stuser);
  300. }
  301. static void stuser_put(struct switchtec_user *stuser)
  302. {
  303. kref_put(&stuser->kref, stuser_free);
  304. }
  305. static void stuser_set_state(struct switchtec_user *stuser,
  306. enum mrpc_state state)
  307. {
  308. /* requires the mrpc_mutex to already be held when called */
  309. const char * const state_names[] = {
  310. [MRPC_IDLE] = "IDLE",
  311. [MRPC_QUEUED] = "QUEUED",
  312. [MRPC_RUNNING] = "RUNNING",
  313. [MRPC_DONE] = "DONE",
  314. };
  315. stuser->state = state;
  316. dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
  317. stuser, state_names[state]);
  318. }
  319. static void mrpc_complete_cmd(struct switchtec_dev *stdev);
  320. static void mrpc_cmd_submit(struct switchtec_dev *stdev)
  321. {
  322. /* requires the mrpc_mutex to already be held when called */
  323. struct switchtec_user *stuser;
  324. if (stdev->mrpc_busy)
  325. return;
  326. if (list_empty(&stdev->mrpc_queue))
  327. return;
  328. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  329. list);
  330. stuser_set_state(stuser, MRPC_RUNNING);
  331. stdev->mrpc_busy = 1;
  332. memcpy_toio(&stdev->mmio_mrpc->input_data,
  333. stuser->data, stuser->data_len);
  334. iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
  335. stuser->status = ioread32(&stdev->mmio_mrpc->status);
  336. if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
  337. mrpc_complete_cmd(stdev);
  338. schedule_delayed_work(&stdev->mrpc_timeout,
  339. msecs_to_jiffies(500));
  340. }
  341. static int mrpc_queue_cmd(struct switchtec_user *stuser)
  342. {
  343. /* requires the mrpc_mutex to already be held when called */
  344. struct switchtec_dev *stdev = stuser->stdev;
  345. kref_get(&stuser->kref);
  346. stuser->read_len = sizeof(stuser->data);
  347. stuser_set_state(stuser, MRPC_QUEUED);
  348. init_completion(&stuser->comp);
  349. list_add_tail(&stuser->list, &stdev->mrpc_queue);
  350. mrpc_cmd_submit(stdev);
  351. return 0;
  352. }
  353. static void mrpc_complete_cmd(struct switchtec_dev *stdev)
  354. {
  355. /* requires the mrpc_mutex to already be held when called */
  356. struct switchtec_user *stuser;
  357. if (list_empty(&stdev->mrpc_queue))
  358. return;
  359. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  360. list);
  361. stuser->status = ioread32(&stdev->mmio_mrpc->status);
  362. if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
  363. return;
  364. stuser_set_state(stuser, MRPC_DONE);
  365. stuser->return_code = 0;
  366. if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
  367. goto out;
  368. stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
  369. if (stuser->return_code != 0)
  370. goto out;
  371. memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
  372. stuser->read_len);
  373. out:
  374. complete_all(&stuser->comp);
  375. list_del_init(&stuser->list);
  376. stuser_put(stuser);
  377. stdev->mrpc_busy = 0;
  378. mrpc_cmd_submit(stdev);
  379. }
  380. static void mrpc_event_work(struct work_struct *work)
  381. {
  382. struct switchtec_dev *stdev;
  383. stdev = container_of(work, struct switchtec_dev, mrpc_work);
  384. dev_dbg(&stdev->dev, "%s\n", __func__);
  385. mutex_lock(&stdev->mrpc_mutex);
  386. cancel_delayed_work(&stdev->mrpc_timeout);
  387. mrpc_complete_cmd(stdev);
  388. mutex_unlock(&stdev->mrpc_mutex);
  389. }
  390. static void mrpc_timeout_work(struct work_struct *work)
  391. {
  392. struct switchtec_dev *stdev;
  393. u32 status;
  394. stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
  395. dev_dbg(&stdev->dev, "%s\n", __func__);
  396. mutex_lock(&stdev->mrpc_mutex);
  397. status = ioread32(&stdev->mmio_mrpc->status);
  398. if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
  399. schedule_delayed_work(&stdev->mrpc_timeout,
  400. msecs_to_jiffies(500));
  401. goto out;
  402. }
  403. mrpc_complete_cmd(stdev);
  404. out:
  405. mutex_unlock(&stdev->mrpc_mutex);
  406. }
  407. static ssize_t device_version_show(struct device *dev,
  408. struct device_attribute *attr, char *buf)
  409. {
  410. struct switchtec_dev *stdev = to_stdev(dev);
  411. u32 ver;
  412. ver = ioread32(&stdev->mmio_sys_info->device_version);
  413. return sprintf(buf, "%x\n", ver);
  414. }
  415. static DEVICE_ATTR_RO(device_version);
  416. static ssize_t fw_version_show(struct device *dev,
  417. struct device_attribute *attr, char *buf)
  418. {
  419. struct switchtec_dev *stdev = to_stdev(dev);
  420. u32 ver;
  421. ver = ioread32(&stdev->mmio_sys_info->firmware_version);
  422. return sprintf(buf, "%08x\n", ver);
  423. }
  424. static DEVICE_ATTR_RO(fw_version);
  425. static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
  426. {
  427. int i;
  428. memcpy_fromio(buf, attr, len);
  429. buf[len] = '\n';
  430. buf[len + 1] = 0;
  431. for (i = len - 1; i > 0; i--) {
  432. if (buf[i] != ' ')
  433. break;
  434. buf[i] = '\n';
  435. buf[i + 1] = 0;
  436. }
  437. return strlen(buf);
  438. }
  439. #define DEVICE_ATTR_SYS_INFO_STR(field) \
  440. static ssize_t field ## _show(struct device *dev, \
  441. struct device_attribute *attr, char *buf) \
  442. { \
  443. struct switchtec_dev *stdev = to_stdev(dev); \
  444. return io_string_show(buf, &stdev->mmio_sys_info->field, \
  445. sizeof(stdev->mmio_sys_info->field)); \
  446. } \
  447. \
  448. static DEVICE_ATTR_RO(field)
  449. DEVICE_ATTR_SYS_INFO_STR(vendor_id);
  450. DEVICE_ATTR_SYS_INFO_STR(product_id);
  451. DEVICE_ATTR_SYS_INFO_STR(product_revision);
  452. DEVICE_ATTR_SYS_INFO_STR(component_vendor);
  453. static ssize_t component_id_show(struct device *dev,
  454. struct device_attribute *attr, char *buf)
  455. {
  456. struct switchtec_dev *stdev = to_stdev(dev);
  457. int id = ioread16(&stdev->mmio_sys_info->component_id);
  458. return sprintf(buf, "PM%04X\n", id);
  459. }
  460. static DEVICE_ATTR_RO(component_id);
  461. static ssize_t component_revision_show(struct device *dev,
  462. struct device_attribute *attr, char *buf)
  463. {
  464. struct switchtec_dev *stdev = to_stdev(dev);
  465. int rev = ioread8(&stdev->mmio_sys_info->component_revision);
  466. return sprintf(buf, "%d\n", rev);
  467. }
  468. static DEVICE_ATTR_RO(component_revision);
  469. static ssize_t partition_show(struct device *dev,
  470. struct device_attribute *attr, char *buf)
  471. {
  472. struct switchtec_dev *stdev = to_stdev(dev);
  473. return sprintf(buf, "%d\n", stdev->partition);
  474. }
  475. static DEVICE_ATTR_RO(partition);
  476. static ssize_t partition_count_show(struct device *dev,
  477. struct device_attribute *attr, char *buf)
  478. {
  479. struct switchtec_dev *stdev = to_stdev(dev);
  480. return sprintf(buf, "%d\n", stdev->partition_count);
  481. }
  482. static DEVICE_ATTR_RO(partition_count);
  483. static struct attribute *switchtec_device_attrs[] = {
  484. &dev_attr_device_version.attr,
  485. &dev_attr_fw_version.attr,
  486. &dev_attr_vendor_id.attr,
  487. &dev_attr_product_id.attr,
  488. &dev_attr_product_revision.attr,
  489. &dev_attr_component_vendor.attr,
  490. &dev_attr_component_id.attr,
  491. &dev_attr_component_revision.attr,
  492. &dev_attr_partition.attr,
  493. &dev_attr_partition_count.attr,
  494. NULL,
  495. };
  496. ATTRIBUTE_GROUPS(switchtec_device);
  497. static int switchtec_dev_open(struct inode *inode, struct file *filp)
  498. {
  499. struct switchtec_dev *stdev;
  500. struct switchtec_user *stuser;
  501. stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
  502. stuser = stuser_create(stdev);
  503. if (IS_ERR(stuser))
  504. return PTR_ERR(stuser);
  505. filp->private_data = stuser;
  506. nonseekable_open(inode, filp);
  507. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  508. return 0;
  509. }
  510. static int switchtec_dev_release(struct inode *inode, struct file *filp)
  511. {
  512. struct switchtec_user *stuser = filp->private_data;
  513. stuser_put(stuser);
  514. return 0;
  515. }
  516. static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
  517. {
  518. if (mutex_lock_interruptible(&stdev->mrpc_mutex))
  519. return -EINTR;
  520. if (!stdev->alive) {
  521. mutex_unlock(&stdev->mrpc_mutex);
  522. return -ENODEV;
  523. }
  524. return 0;
  525. }
  526. static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
  527. size_t size, loff_t *off)
  528. {
  529. struct switchtec_user *stuser = filp->private_data;
  530. struct switchtec_dev *stdev = stuser->stdev;
  531. int rc;
  532. if (size < sizeof(stuser->cmd) ||
  533. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  534. return -EINVAL;
  535. stuser->data_len = size - sizeof(stuser->cmd);
  536. rc = lock_mutex_and_test_alive(stdev);
  537. if (rc)
  538. return rc;
  539. if (stuser->state != MRPC_IDLE) {
  540. rc = -EBADE;
  541. goto out;
  542. }
  543. rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
  544. if (rc) {
  545. rc = -EFAULT;
  546. goto out;
  547. }
  548. data += sizeof(stuser->cmd);
  549. rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
  550. if (rc) {
  551. rc = -EFAULT;
  552. goto out;
  553. }
  554. rc = mrpc_queue_cmd(stuser);
  555. out:
  556. mutex_unlock(&stdev->mrpc_mutex);
  557. if (rc)
  558. return rc;
  559. return size;
  560. }
  561. static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
  562. size_t size, loff_t *off)
  563. {
  564. struct switchtec_user *stuser = filp->private_data;
  565. struct switchtec_dev *stdev = stuser->stdev;
  566. int rc;
  567. if (size < sizeof(stuser->cmd) ||
  568. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  569. return -EINVAL;
  570. rc = lock_mutex_and_test_alive(stdev);
  571. if (rc)
  572. return rc;
  573. if (stuser->state == MRPC_IDLE) {
  574. mutex_unlock(&stdev->mrpc_mutex);
  575. return -EBADE;
  576. }
  577. stuser->read_len = size - sizeof(stuser->return_code);
  578. mutex_unlock(&stdev->mrpc_mutex);
  579. if (filp->f_flags & O_NONBLOCK) {
  580. if (!try_wait_for_completion(&stuser->comp))
  581. return -EAGAIN;
  582. } else {
  583. rc = wait_for_completion_interruptible(&stuser->comp);
  584. if (rc < 0)
  585. return rc;
  586. }
  587. rc = lock_mutex_and_test_alive(stdev);
  588. if (rc)
  589. return rc;
  590. if (stuser->state != MRPC_DONE) {
  591. mutex_unlock(&stdev->mrpc_mutex);
  592. return -EBADE;
  593. }
  594. rc = copy_to_user(data, &stuser->return_code,
  595. sizeof(stuser->return_code));
  596. if (rc) {
  597. rc = -EFAULT;
  598. goto out;
  599. }
  600. data += sizeof(stuser->return_code);
  601. rc = copy_to_user(data, &stuser->data,
  602. size - sizeof(stuser->return_code));
  603. if (rc) {
  604. rc = -EFAULT;
  605. goto out;
  606. }
  607. stuser_set_state(stuser, MRPC_IDLE);
  608. out:
  609. mutex_unlock(&stdev->mrpc_mutex);
  610. if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
  611. return size;
  612. else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
  613. return -ENXIO;
  614. else
  615. return -EBADMSG;
  616. }
  617. static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
  618. {
  619. struct switchtec_user *stuser = filp->private_data;
  620. struct switchtec_dev *stdev = stuser->stdev;
  621. int ret = 0;
  622. poll_wait(filp, &stuser->comp.wait, wait);
  623. poll_wait(filp, &stdev->event_wq, wait);
  624. if (lock_mutex_and_test_alive(stdev))
  625. return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP;
  626. mutex_unlock(&stdev->mrpc_mutex);
  627. if (try_wait_for_completion(&stuser->comp))
  628. ret |= POLLIN | POLLRDNORM;
  629. if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
  630. ret |= POLLPRI | POLLRDBAND;
  631. return ret;
  632. }
  633. static int ioctl_flash_info(struct switchtec_dev *stdev,
  634. struct switchtec_ioctl_flash_info __user *uinfo)
  635. {
  636. struct switchtec_ioctl_flash_info info = {0};
  637. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  638. info.flash_length = ioread32(&fi->flash_length);
  639. info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
  640. if (copy_to_user(uinfo, &info, sizeof(info)))
  641. return -EFAULT;
  642. return 0;
  643. }
  644. static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
  645. struct partition_info __iomem *pi)
  646. {
  647. info->address = ioread32(&pi->address);
  648. info->length = ioread32(&pi->length);
  649. }
  650. static int ioctl_flash_part_info(struct switchtec_dev *stdev,
  651. struct switchtec_ioctl_flash_part_info __user *uinfo)
  652. {
  653. struct switchtec_ioctl_flash_part_info info = {0};
  654. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  655. u32 active_addr = -1;
  656. if (copy_from_user(&info, uinfo, sizeof(info)))
  657. return -EFAULT;
  658. switch (info.flash_partition) {
  659. case SWITCHTEC_IOCTL_PART_CFG0:
  660. active_addr = ioread32(&fi->active_cfg);
  661. set_fw_info_part(&info, &fi->cfg0);
  662. break;
  663. case SWITCHTEC_IOCTL_PART_CFG1:
  664. active_addr = ioread32(&fi->active_cfg);
  665. set_fw_info_part(&info, &fi->cfg1);
  666. break;
  667. case SWITCHTEC_IOCTL_PART_IMG0:
  668. active_addr = ioread32(&fi->active_img);
  669. set_fw_info_part(&info, &fi->img0);
  670. break;
  671. case SWITCHTEC_IOCTL_PART_IMG1:
  672. active_addr = ioread32(&fi->active_img);
  673. set_fw_info_part(&info, &fi->img1);
  674. break;
  675. case SWITCHTEC_IOCTL_PART_NVLOG:
  676. set_fw_info_part(&info, &fi->nvlog);
  677. break;
  678. case SWITCHTEC_IOCTL_PART_VENDOR0:
  679. set_fw_info_part(&info, &fi->vendor[0]);
  680. break;
  681. case SWITCHTEC_IOCTL_PART_VENDOR1:
  682. set_fw_info_part(&info, &fi->vendor[1]);
  683. break;
  684. case SWITCHTEC_IOCTL_PART_VENDOR2:
  685. set_fw_info_part(&info, &fi->vendor[2]);
  686. break;
  687. case SWITCHTEC_IOCTL_PART_VENDOR3:
  688. set_fw_info_part(&info, &fi->vendor[3]);
  689. break;
  690. case SWITCHTEC_IOCTL_PART_VENDOR4:
  691. set_fw_info_part(&info, &fi->vendor[4]);
  692. break;
  693. case SWITCHTEC_IOCTL_PART_VENDOR5:
  694. set_fw_info_part(&info, &fi->vendor[5]);
  695. break;
  696. case SWITCHTEC_IOCTL_PART_VENDOR6:
  697. set_fw_info_part(&info, &fi->vendor[6]);
  698. break;
  699. case SWITCHTEC_IOCTL_PART_VENDOR7:
  700. set_fw_info_part(&info, &fi->vendor[7]);
  701. break;
  702. default:
  703. return -EINVAL;
  704. }
  705. if (info.address == active_addr)
  706. info.active = 1;
  707. if (copy_to_user(uinfo, &info, sizeof(info)))
  708. return -EFAULT;
  709. return 0;
  710. }
  711. static int ioctl_event_summary(struct switchtec_dev *stdev,
  712. struct switchtec_user *stuser,
  713. struct switchtec_ioctl_event_summary __user *usum)
  714. {
  715. struct switchtec_ioctl_event_summary s = {0};
  716. int i;
  717. u32 reg;
  718. s.global = ioread32(&stdev->mmio_sw_event->global_summary);
  719. s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
  720. s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
  721. for (i = 0; i < stdev->partition_count; i++) {
  722. reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
  723. s.part[i] = reg;
  724. }
  725. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  726. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  727. if (reg != MICROSEMI_VENDOR_ID)
  728. break;
  729. reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
  730. s.pff[i] = reg;
  731. }
  732. if (copy_to_user(usum, &s, sizeof(s)))
  733. return -EFAULT;
  734. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  735. return 0;
  736. }
  737. static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
  738. size_t offset, int index)
  739. {
  740. return (void __iomem *)stdev->mmio_sw_event + offset;
  741. }
  742. static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
  743. size_t offset, int index)
  744. {
  745. return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
  746. }
  747. static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
  748. size_t offset, int index)
  749. {
  750. return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
  751. }
  752. #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
  753. #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
  754. #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
  755. const struct event_reg {
  756. size_t offset;
  757. u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
  758. size_t offset, int index);
  759. } event_regs[] = {
  760. EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
  761. EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
  762. EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
  763. EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
  764. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
  765. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
  766. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
  767. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
  768. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
  769. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
  770. twi_mrpc_comp_async_hdr),
  771. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
  772. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
  773. cli_mrpc_comp_async_hdr),
  774. EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
  775. EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
  776. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
  777. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
  778. EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
  779. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
  780. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
  781. EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
  782. EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
  783. EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
  784. EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
  785. EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
  786. EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
  787. EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
  788. EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
  789. EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
  790. EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
  791. };
  792. static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
  793. int event_id, int index)
  794. {
  795. size_t off;
  796. if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  797. return ERR_PTR(-EINVAL);
  798. off = event_regs[event_id].offset;
  799. if (event_regs[event_id].map_reg == part_ev_reg) {
  800. if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  801. index = stdev->partition;
  802. else if (index < 0 || index >= stdev->partition_count)
  803. return ERR_PTR(-EINVAL);
  804. } else if (event_regs[event_id].map_reg == pff_ev_reg) {
  805. if (index < 0 || index >= stdev->pff_csr_count)
  806. return ERR_PTR(-EINVAL);
  807. }
  808. return event_regs[event_id].map_reg(stdev, off, index);
  809. }
  810. static int event_ctl(struct switchtec_dev *stdev,
  811. struct switchtec_ioctl_event_ctl *ctl)
  812. {
  813. int i;
  814. u32 __iomem *reg;
  815. u32 hdr;
  816. reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
  817. if (IS_ERR(reg))
  818. return PTR_ERR(reg);
  819. hdr = ioread32(reg);
  820. for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
  821. ctl->data[i] = ioread32(&reg[i + 1]);
  822. ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
  823. ctl->count = (hdr >> 5) & 0xFF;
  824. if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
  825. hdr &= ~SWITCHTEC_EVENT_CLEAR;
  826. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
  827. hdr |= SWITCHTEC_EVENT_EN_IRQ;
  828. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
  829. hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
  830. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
  831. hdr |= SWITCHTEC_EVENT_EN_LOG;
  832. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
  833. hdr &= ~SWITCHTEC_EVENT_EN_LOG;
  834. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
  835. hdr |= SWITCHTEC_EVENT_EN_CLI;
  836. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
  837. hdr &= ~SWITCHTEC_EVENT_EN_CLI;
  838. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
  839. hdr |= SWITCHTEC_EVENT_FATAL;
  840. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
  841. hdr &= ~SWITCHTEC_EVENT_FATAL;
  842. if (ctl->flags)
  843. iowrite32(hdr, reg);
  844. ctl->flags = 0;
  845. if (hdr & SWITCHTEC_EVENT_EN_IRQ)
  846. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
  847. if (hdr & SWITCHTEC_EVENT_EN_LOG)
  848. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
  849. if (hdr & SWITCHTEC_EVENT_EN_CLI)
  850. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
  851. if (hdr & SWITCHTEC_EVENT_FATAL)
  852. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
  853. return 0;
  854. }
  855. static int ioctl_event_ctl(struct switchtec_dev *stdev,
  856. struct switchtec_ioctl_event_ctl __user *uctl)
  857. {
  858. int ret;
  859. int nr_idxs;
  860. struct switchtec_ioctl_event_ctl ctl;
  861. if (copy_from_user(&ctl, uctl, sizeof(ctl)))
  862. return -EFAULT;
  863. if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  864. return -EINVAL;
  865. if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
  866. return -EINVAL;
  867. if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
  868. if (event_regs[ctl.event_id].map_reg == global_ev_reg)
  869. nr_idxs = 1;
  870. else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
  871. nr_idxs = stdev->partition_count;
  872. else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
  873. nr_idxs = stdev->pff_csr_count;
  874. else
  875. return -EINVAL;
  876. for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
  877. ret = event_ctl(stdev, &ctl);
  878. if (ret < 0)
  879. return ret;
  880. }
  881. } else {
  882. ret = event_ctl(stdev, &ctl);
  883. if (ret < 0)
  884. return ret;
  885. }
  886. if (copy_to_user(uctl, &ctl, sizeof(ctl)))
  887. return -EFAULT;
  888. return 0;
  889. }
  890. static int ioctl_pff_to_port(struct switchtec_dev *stdev,
  891. struct switchtec_ioctl_pff_port *up)
  892. {
  893. int i, part;
  894. u32 reg;
  895. struct part_cfg_regs *pcfg;
  896. struct switchtec_ioctl_pff_port p;
  897. if (copy_from_user(&p, up, sizeof(p)))
  898. return -EFAULT;
  899. p.port = -1;
  900. for (part = 0; part < stdev->partition_count; part++) {
  901. pcfg = &stdev->mmio_part_cfg_all[part];
  902. p.partition = part;
  903. reg = ioread32(&pcfg->usp_pff_inst_id);
  904. if (reg == p.pff) {
  905. p.port = 0;
  906. break;
  907. }
  908. reg = ioread32(&pcfg->vep_pff_inst_id);
  909. if (reg == p.pff) {
  910. p.port = SWITCHTEC_IOCTL_PFF_VEP;
  911. break;
  912. }
  913. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  914. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  915. if (reg != p.pff)
  916. continue;
  917. p.port = i + 1;
  918. break;
  919. }
  920. if (p.port != -1)
  921. break;
  922. }
  923. if (copy_to_user(up, &p, sizeof(p)))
  924. return -EFAULT;
  925. return 0;
  926. }
  927. static int ioctl_port_to_pff(struct switchtec_dev *stdev,
  928. struct switchtec_ioctl_pff_port *up)
  929. {
  930. struct switchtec_ioctl_pff_port p;
  931. struct part_cfg_regs *pcfg;
  932. if (copy_from_user(&p, up, sizeof(p)))
  933. return -EFAULT;
  934. if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  935. pcfg = stdev->mmio_part_cfg;
  936. else if (p.partition < stdev->partition_count)
  937. pcfg = &stdev->mmio_part_cfg_all[p.partition];
  938. else
  939. return -EINVAL;
  940. switch (p.port) {
  941. case 0:
  942. p.pff = ioread32(&pcfg->usp_pff_inst_id);
  943. break;
  944. case SWITCHTEC_IOCTL_PFF_VEP:
  945. p.pff = ioread32(&pcfg->vep_pff_inst_id);
  946. break;
  947. default:
  948. if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
  949. return -EINVAL;
  950. p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
  951. break;
  952. }
  953. if (copy_to_user(up, &p, sizeof(p)))
  954. return -EFAULT;
  955. return 0;
  956. }
  957. static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
  958. unsigned long arg)
  959. {
  960. struct switchtec_user *stuser = filp->private_data;
  961. struct switchtec_dev *stdev = stuser->stdev;
  962. int rc;
  963. void __user *argp = (void __user *)arg;
  964. rc = lock_mutex_and_test_alive(stdev);
  965. if (rc)
  966. return rc;
  967. switch (cmd) {
  968. case SWITCHTEC_IOCTL_FLASH_INFO:
  969. rc = ioctl_flash_info(stdev, argp);
  970. break;
  971. case SWITCHTEC_IOCTL_FLASH_PART_INFO:
  972. rc = ioctl_flash_part_info(stdev, argp);
  973. break;
  974. case SWITCHTEC_IOCTL_EVENT_SUMMARY:
  975. rc = ioctl_event_summary(stdev, stuser, argp);
  976. break;
  977. case SWITCHTEC_IOCTL_EVENT_CTL:
  978. rc = ioctl_event_ctl(stdev, argp);
  979. break;
  980. case SWITCHTEC_IOCTL_PFF_TO_PORT:
  981. rc = ioctl_pff_to_port(stdev, argp);
  982. break;
  983. case SWITCHTEC_IOCTL_PORT_TO_PFF:
  984. rc = ioctl_port_to_pff(stdev, argp);
  985. break;
  986. default:
  987. rc = -ENOTTY;
  988. break;
  989. }
  990. mutex_unlock(&stdev->mrpc_mutex);
  991. return rc;
  992. }
  993. static const struct file_operations switchtec_fops = {
  994. .owner = THIS_MODULE,
  995. .open = switchtec_dev_open,
  996. .release = switchtec_dev_release,
  997. .write = switchtec_dev_write,
  998. .read = switchtec_dev_read,
  999. .poll = switchtec_dev_poll,
  1000. .unlocked_ioctl = switchtec_dev_ioctl,
  1001. .compat_ioctl = switchtec_dev_ioctl,
  1002. };
  1003. static void stdev_release(struct device *dev)
  1004. {
  1005. struct switchtec_dev *stdev = to_stdev(dev);
  1006. kfree(stdev);
  1007. }
  1008. static void stdev_kill(struct switchtec_dev *stdev)
  1009. {
  1010. struct switchtec_user *stuser, *tmpuser;
  1011. pci_clear_master(stdev->pdev);
  1012. cancel_delayed_work_sync(&stdev->mrpc_timeout);
  1013. /* Mark the hardware as unavailable and complete all completions */
  1014. mutex_lock(&stdev->mrpc_mutex);
  1015. stdev->alive = false;
  1016. /* Wake up and kill any users waiting on an MRPC request */
  1017. list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
  1018. complete_all(&stuser->comp);
  1019. list_del_init(&stuser->list);
  1020. stuser_put(stuser);
  1021. }
  1022. mutex_unlock(&stdev->mrpc_mutex);
  1023. /* Wake up any users waiting on event_wq */
  1024. wake_up_interruptible(&stdev->event_wq);
  1025. }
  1026. static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
  1027. {
  1028. struct switchtec_dev *stdev;
  1029. int minor;
  1030. struct device *dev;
  1031. struct cdev *cdev;
  1032. int rc;
  1033. stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
  1034. dev_to_node(&pdev->dev));
  1035. if (!stdev)
  1036. return ERR_PTR(-ENOMEM);
  1037. stdev->alive = true;
  1038. stdev->pdev = pdev;
  1039. INIT_LIST_HEAD(&stdev->mrpc_queue);
  1040. mutex_init(&stdev->mrpc_mutex);
  1041. stdev->mrpc_busy = 0;
  1042. INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
  1043. INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
  1044. init_waitqueue_head(&stdev->event_wq);
  1045. atomic_set(&stdev->event_cnt, 0);
  1046. dev = &stdev->dev;
  1047. device_initialize(dev);
  1048. dev->class = switchtec_class;
  1049. dev->parent = &pdev->dev;
  1050. dev->groups = switchtec_device_groups;
  1051. dev->release = stdev_release;
  1052. minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
  1053. GFP_KERNEL);
  1054. if (minor < 0) {
  1055. rc = minor;
  1056. goto err_put;
  1057. }
  1058. dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
  1059. dev_set_name(dev, "switchtec%d", minor);
  1060. cdev = &stdev->cdev;
  1061. cdev_init(cdev, &switchtec_fops);
  1062. cdev->owner = THIS_MODULE;
  1063. return stdev;
  1064. err_put:
  1065. put_device(&stdev->dev);
  1066. return ERR_PTR(rc);
  1067. }
  1068. static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
  1069. {
  1070. size_t off = event_regs[eid].offset;
  1071. u32 __iomem *hdr_reg;
  1072. u32 hdr;
  1073. hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
  1074. hdr = ioread32(hdr_reg);
  1075. if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
  1076. return 0;
  1077. dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
  1078. hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
  1079. iowrite32(hdr, hdr_reg);
  1080. return 1;
  1081. }
  1082. static int mask_all_events(struct switchtec_dev *stdev, int eid)
  1083. {
  1084. int idx;
  1085. int count = 0;
  1086. if (event_regs[eid].map_reg == part_ev_reg) {
  1087. for (idx = 0; idx < stdev->partition_count; idx++)
  1088. count += mask_event(stdev, eid, idx);
  1089. } else if (event_regs[eid].map_reg == pff_ev_reg) {
  1090. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  1091. if (!stdev->pff_local[idx])
  1092. continue;
  1093. count += mask_event(stdev, eid, idx);
  1094. }
  1095. } else {
  1096. count += mask_event(stdev, eid, 0);
  1097. }
  1098. return count;
  1099. }
  1100. static irqreturn_t switchtec_event_isr(int irq, void *dev)
  1101. {
  1102. struct switchtec_dev *stdev = dev;
  1103. u32 reg;
  1104. irqreturn_t ret = IRQ_NONE;
  1105. int eid, event_count = 0;
  1106. reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
  1107. if (reg & SWITCHTEC_EVENT_OCCURRED) {
  1108. dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
  1109. ret = IRQ_HANDLED;
  1110. schedule_work(&stdev->mrpc_work);
  1111. iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
  1112. }
  1113. for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
  1114. event_count += mask_all_events(stdev, eid);
  1115. if (event_count) {
  1116. atomic_inc(&stdev->event_cnt);
  1117. wake_up_interruptible(&stdev->event_wq);
  1118. dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
  1119. event_count);
  1120. return IRQ_HANDLED;
  1121. }
  1122. return ret;
  1123. }
  1124. static int switchtec_init_isr(struct switchtec_dev *stdev)
  1125. {
  1126. int nvecs;
  1127. int event_irq;
  1128. nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
  1129. PCI_IRQ_MSIX | PCI_IRQ_MSI);
  1130. if (nvecs < 0)
  1131. return nvecs;
  1132. event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
  1133. if (event_irq < 0 || event_irq >= nvecs)
  1134. return -EFAULT;
  1135. event_irq = pci_irq_vector(stdev->pdev, event_irq);
  1136. if (event_irq < 0)
  1137. return event_irq;
  1138. return devm_request_irq(&stdev->pdev->dev, event_irq,
  1139. switchtec_event_isr, 0,
  1140. KBUILD_MODNAME, stdev);
  1141. }
  1142. static void init_pff(struct switchtec_dev *stdev)
  1143. {
  1144. int i;
  1145. u32 reg;
  1146. struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
  1147. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  1148. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  1149. if (reg != MICROSEMI_VENDOR_ID)
  1150. break;
  1151. }
  1152. stdev->pff_csr_count = i;
  1153. reg = ioread32(&pcfg->usp_pff_inst_id);
  1154. if (reg < SWITCHTEC_MAX_PFF_CSR)
  1155. stdev->pff_local[reg] = 1;
  1156. reg = ioread32(&pcfg->vep_pff_inst_id);
  1157. if (reg < SWITCHTEC_MAX_PFF_CSR)
  1158. stdev->pff_local[reg] = 1;
  1159. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  1160. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  1161. if (reg < SWITCHTEC_MAX_PFF_CSR)
  1162. stdev->pff_local[reg] = 1;
  1163. }
  1164. }
  1165. static int switchtec_init_pci(struct switchtec_dev *stdev,
  1166. struct pci_dev *pdev)
  1167. {
  1168. int rc;
  1169. rc = pcim_enable_device(pdev);
  1170. if (rc)
  1171. return rc;
  1172. rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
  1173. if (rc)
  1174. return rc;
  1175. pci_set_master(pdev);
  1176. stdev->mmio = pcim_iomap_table(pdev)[0];
  1177. stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
  1178. stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
  1179. stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
  1180. stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
  1181. stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
  1182. stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
  1183. stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
  1184. stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
  1185. stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
  1186. stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
  1187. if (stdev->partition_count < 1)
  1188. stdev->partition_count = 1;
  1189. init_pff(stdev);
  1190. pci_set_drvdata(pdev, stdev);
  1191. return 0;
  1192. }
  1193. static int switchtec_pci_probe(struct pci_dev *pdev,
  1194. const struct pci_device_id *id)
  1195. {
  1196. struct switchtec_dev *stdev;
  1197. int rc;
  1198. stdev = stdev_create(pdev);
  1199. if (IS_ERR(stdev))
  1200. return PTR_ERR(stdev);
  1201. rc = switchtec_init_pci(stdev, pdev);
  1202. if (rc)
  1203. goto err_put;
  1204. rc = switchtec_init_isr(stdev);
  1205. if (rc) {
  1206. dev_err(&stdev->dev, "failed to init isr.\n");
  1207. goto err_put;
  1208. }
  1209. iowrite32(SWITCHTEC_EVENT_CLEAR |
  1210. SWITCHTEC_EVENT_EN_IRQ,
  1211. &stdev->mmio_part_cfg->mrpc_comp_hdr);
  1212. rc = cdev_device_add(&stdev->cdev, &stdev->dev);
  1213. if (rc)
  1214. goto err_devadd;
  1215. dev_info(&stdev->dev, "Management device registered.\n");
  1216. return 0;
  1217. err_devadd:
  1218. stdev_kill(stdev);
  1219. err_put:
  1220. ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1221. put_device(&stdev->dev);
  1222. return rc;
  1223. }
  1224. static void switchtec_pci_remove(struct pci_dev *pdev)
  1225. {
  1226. struct switchtec_dev *stdev = pci_get_drvdata(pdev);
  1227. pci_set_drvdata(pdev, NULL);
  1228. cdev_device_del(&stdev->cdev, &stdev->dev);
  1229. ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1230. dev_info(&stdev->dev, "unregistered.\n");
  1231. stdev_kill(stdev);
  1232. put_device(&stdev->dev);
  1233. }
  1234. #define SWITCHTEC_PCI_DEVICE(device_id) \
  1235. { \
  1236. .vendor = MICROSEMI_VENDOR_ID, \
  1237. .device = device_id, \
  1238. .subvendor = PCI_ANY_ID, \
  1239. .subdevice = PCI_ANY_ID, \
  1240. .class = MICROSEMI_MGMT_CLASSCODE, \
  1241. .class_mask = 0xFFFFFFFF, \
  1242. }, \
  1243. { \
  1244. .vendor = MICROSEMI_VENDOR_ID, \
  1245. .device = device_id, \
  1246. .subvendor = PCI_ANY_ID, \
  1247. .subdevice = PCI_ANY_ID, \
  1248. .class = MICROSEMI_NTB_CLASSCODE, \
  1249. .class_mask = 0xFFFFFFFF, \
  1250. }
  1251. static const struct pci_device_id switchtec_pci_tbl[] = {
  1252. SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
  1253. SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
  1254. SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
  1255. SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
  1256. SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
  1257. SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
  1258. SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
  1259. SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
  1260. SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
  1261. SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
  1262. {0}
  1263. };
  1264. MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
  1265. static struct pci_driver switchtec_pci_driver = {
  1266. .name = KBUILD_MODNAME,
  1267. .id_table = switchtec_pci_tbl,
  1268. .probe = switchtec_pci_probe,
  1269. .remove = switchtec_pci_remove,
  1270. };
  1271. static int __init switchtec_init(void)
  1272. {
  1273. int rc;
  1274. rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
  1275. "switchtec");
  1276. if (rc)
  1277. return rc;
  1278. switchtec_class = class_create(THIS_MODULE, "switchtec");
  1279. if (IS_ERR(switchtec_class)) {
  1280. rc = PTR_ERR(switchtec_class);
  1281. goto err_create_class;
  1282. }
  1283. rc = pci_register_driver(&switchtec_pci_driver);
  1284. if (rc)
  1285. goto err_pci_register;
  1286. pr_info(KBUILD_MODNAME ": loaded.\n");
  1287. return 0;
  1288. err_pci_register:
  1289. class_destroy(switchtec_class);
  1290. err_create_class:
  1291. unregister_chrdev_region(switchtec_devt, max_devices);
  1292. return rc;
  1293. }
  1294. module_init(switchtec_init);
  1295. static void __exit switchtec_exit(void)
  1296. {
  1297. pci_unregister_driver(&switchtec_pci_driver);
  1298. class_destroy(switchtec_class);
  1299. unregister_chrdev_region(switchtec_devt, max_devices);
  1300. ida_destroy(&switchtec_minor_ida);
  1301. pr_info(KBUILD_MODNAME ": unloaded.\n");
  1302. }
  1303. module_exit(switchtec_exit);