switchtec.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Microsemi Switchtec(tm) PCIe Management Driver
  4. * Copyright (c) 2017, Microsemi Corporation
  5. */
  6. #include <linux/switchtec.h>
  7. #include <linux/switchtec_ioctl.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/fs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/poll.h>
  13. #include <linux/wait.h>
  14. #include <linux/nospec.h>
  15. MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
  16. MODULE_VERSION("0.1");
  17. MODULE_LICENSE("GPL");
  18. MODULE_AUTHOR("Microsemi Corporation");
  19. static int max_devices = 16;
  20. module_param(max_devices, int, 0644);
  21. MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
  22. static dev_t switchtec_devt;
  23. static DEFINE_IDA(switchtec_minor_ida);
  24. struct class *switchtec_class;
  25. EXPORT_SYMBOL_GPL(switchtec_class);
  26. enum mrpc_state {
  27. MRPC_IDLE = 0,
  28. MRPC_QUEUED,
  29. MRPC_RUNNING,
  30. MRPC_DONE,
  31. };
  32. struct switchtec_user {
  33. struct switchtec_dev *stdev;
  34. enum mrpc_state state;
  35. struct completion comp;
  36. struct kref kref;
  37. struct list_head list;
  38. u32 cmd;
  39. u32 status;
  40. u32 return_code;
  41. size_t data_len;
  42. size_t read_len;
  43. unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
  44. int event_cnt;
  45. };
  46. static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
  47. {
  48. struct switchtec_user *stuser;
  49. stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
  50. if (!stuser)
  51. return ERR_PTR(-ENOMEM);
  52. get_device(&stdev->dev);
  53. stuser->stdev = stdev;
  54. kref_init(&stuser->kref);
  55. INIT_LIST_HEAD(&stuser->list);
  56. init_completion(&stuser->comp);
  57. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  58. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  59. return stuser;
  60. }
  61. static void stuser_free(struct kref *kref)
  62. {
  63. struct switchtec_user *stuser;
  64. stuser = container_of(kref, struct switchtec_user, kref);
  65. dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
  66. put_device(&stuser->stdev->dev);
  67. kfree(stuser);
  68. }
  69. static void stuser_put(struct switchtec_user *stuser)
  70. {
  71. kref_put(&stuser->kref, stuser_free);
  72. }
  73. static void stuser_set_state(struct switchtec_user *stuser,
  74. enum mrpc_state state)
  75. {
  76. /* requires the mrpc_mutex to already be held when called */
  77. const char * const state_names[] = {
  78. [MRPC_IDLE] = "IDLE",
  79. [MRPC_QUEUED] = "QUEUED",
  80. [MRPC_RUNNING] = "RUNNING",
  81. [MRPC_DONE] = "DONE",
  82. };
  83. stuser->state = state;
  84. dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
  85. stuser, state_names[state]);
  86. }
  87. static void mrpc_complete_cmd(struct switchtec_dev *stdev);
  88. static void mrpc_cmd_submit(struct switchtec_dev *stdev)
  89. {
  90. /* requires the mrpc_mutex to already be held when called */
  91. struct switchtec_user *stuser;
  92. if (stdev->mrpc_busy)
  93. return;
  94. if (list_empty(&stdev->mrpc_queue))
  95. return;
  96. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  97. list);
  98. stuser_set_state(stuser, MRPC_RUNNING);
  99. stdev->mrpc_busy = 1;
  100. memcpy_toio(&stdev->mmio_mrpc->input_data,
  101. stuser->data, stuser->data_len);
  102. iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
  103. stuser->status = ioread32(&stdev->mmio_mrpc->status);
  104. if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
  105. mrpc_complete_cmd(stdev);
  106. schedule_delayed_work(&stdev->mrpc_timeout,
  107. msecs_to_jiffies(500));
  108. }
  109. static int mrpc_queue_cmd(struct switchtec_user *stuser)
  110. {
  111. /* requires the mrpc_mutex to already be held when called */
  112. struct switchtec_dev *stdev = stuser->stdev;
  113. kref_get(&stuser->kref);
  114. stuser->read_len = sizeof(stuser->data);
  115. stuser_set_state(stuser, MRPC_QUEUED);
  116. init_completion(&stuser->comp);
  117. list_add_tail(&stuser->list, &stdev->mrpc_queue);
  118. mrpc_cmd_submit(stdev);
  119. return 0;
  120. }
  121. static void mrpc_complete_cmd(struct switchtec_dev *stdev)
  122. {
  123. /* requires the mrpc_mutex to already be held when called */
  124. struct switchtec_user *stuser;
  125. if (list_empty(&stdev->mrpc_queue))
  126. return;
  127. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  128. list);
  129. stuser->status = ioread32(&stdev->mmio_mrpc->status);
  130. if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
  131. return;
  132. stuser_set_state(stuser, MRPC_DONE);
  133. stuser->return_code = 0;
  134. if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
  135. goto out;
  136. stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
  137. if (stuser->return_code != 0)
  138. goto out;
  139. memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
  140. stuser->read_len);
  141. out:
  142. complete_all(&stuser->comp);
  143. list_del_init(&stuser->list);
  144. stuser_put(stuser);
  145. stdev->mrpc_busy = 0;
  146. mrpc_cmd_submit(stdev);
  147. }
  148. static void mrpc_event_work(struct work_struct *work)
  149. {
  150. struct switchtec_dev *stdev;
  151. stdev = container_of(work, struct switchtec_dev, mrpc_work);
  152. dev_dbg(&stdev->dev, "%s\n", __func__);
  153. mutex_lock(&stdev->mrpc_mutex);
  154. cancel_delayed_work(&stdev->mrpc_timeout);
  155. mrpc_complete_cmd(stdev);
  156. mutex_unlock(&stdev->mrpc_mutex);
  157. }
  158. static void mrpc_timeout_work(struct work_struct *work)
  159. {
  160. struct switchtec_dev *stdev;
  161. u32 status;
  162. stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
  163. dev_dbg(&stdev->dev, "%s\n", __func__);
  164. mutex_lock(&stdev->mrpc_mutex);
  165. status = ioread32(&stdev->mmio_mrpc->status);
  166. if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
  167. schedule_delayed_work(&stdev->mrpc_timeout,
  168. msecs_to_jiffies(500));
  169. goto out;
  170. }
  171. mrpc_complete_cmd(stdev);
  172. out:
  173. mutex_unlock(&stdev->mrpc_mutex);
  174. }
  175. static ssize_t device_version_show(struct device *dev,
  176. struct device_attribute *attr, char *buf)
  177. {
  178. struct switchtec_dev *stdev = to_stdev(dev);
  179. u32 ver;
  180. ver = ioread32(&stdev->mmio_sys_info->device_version);
  181. return sprintf(buf, "%x\n", ver);
  182. }
  183. static DEVICE_ATTR_RO(device_version);
  184. static ssize_t fw_version_show(struct device *dev,
  185. struct device_attribute *attr, char *buf)
  186. {
  187. struct switchtec_dev *stdev = to_stdev(dev);
  188. u32 ver;
  189. ver = ioread32(&stdev->mmio_sys_info->firmware_version);
  190. return sprintf(buf, "%08x\n", ver);
  191. }
  192. static DEVICE_ATTR_RO(fw_version);
  193. static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
  194. {
  195. int i;
  196. memcpy_fromio(buf, attr, len);
  197. buf[len] = '\n';
  198. buf[len + 1] = 0;
  199. for (i = len - 1; i > 0; i--) {
  200. if (buf[i] != ' ')
  201. break;
  202. buf[i] = '\n';
  203. buf[i + 1] = 0;
  204. }
  205. return strlen(buf);
  206. }
  207. #define DEVICE_ATTR_SYS_INFO_STR(field) \
  208. static ssize_t field ## _show(struct device *dev, \
  209. struct device_attribute *attr, char *buf) \
  210. { \
  211. struct switchtec_dev *stdev = to_stdev(dev); \
  212. return io_string_show(buf, &stdev->mmio_sys_info->field, \
  213. sizeof(stdev->mmio_sys_info->field)); \
  214. } \
  215. \
  216. static DEVICE_ATTR_RO(field)
  217. DEVICE_ATTR_SYS_INFO_STR(vendor_id);
  218. DEVICE_ATTR_SYS_INFO_STR(product_id);
  219. DEVICE_ATTR_SYS_INFO_STR(product_revision);
  220. DEVICE_ATTR_SYS_INFO_STR(component_vendor);
  221. static ssize_t component_id_show(struct device *dev,
  222. struct device_attribute *attr, char *buf)
  223. {
  224. struct switchtec_dev *stdev = to_stdev(dev);
  225. int id = ioread16(&stdev->mmio_sys_info->component_id);
  226. return sprintf(buf, "PM%04X\n", id);
  227. }
  228. static DEVICE_ATTR_RO(component_id);
  229. static ssize_t component_revision_show(struct device *dev,
  230. struct device_attribute *attr, char *buf)
  231. {
  232. struct switchtec_dev *stdev = to_stdev(dev);
  233. int rev = ioread8(&stdev->mmio_sys_info->component_revision);
  234. return sprintf(buf, "%d\n", rev);
  235. }
  236. static DEVICE_ATTR_RO(component_revision);
  237. static ssize_t partition_show(struct device *dev,
  238. struct device_attribute *attr, char *buf)
  239. {
  240. struct switchtec_dev *stdev = to_stdev(dev);
  241. return sprintf(buf, "%d\n", stdev->partition);
  242. }
  243. static DEVICE_ATTR_RO(partition);
  244. static ssize_t partition_count_show(struct device *dev,
  245. struct device_attribute *attr, char *buf)
  246. {
  247. struct switchtec_dev *stdev = to_stdev(dev);
  248. return sprintf(buf, "%d\n", stdev->partition_count);
  249. }
  250. static DEVICE_ATTR_RO(partition_count);
  251. static struct attribute *switchtec_device_attrs[] = {
  252. &dev_attr_device_version.attr,
  253. &dev_attr_fw_version.attr,
  254. &dev_attr_vendor_id.attr,
  255. &dev_attr_product_id.attr,
  256. &dev_attr_product_revision.attr,
  257. &dev_attr_component_vendor.attr,
  258. &dev_attr_component_id.attr,
  259. &dev_attr_component_revision.attr,
  260. &dev_attr_partition.attr,
  261. &dev_attr_partition_count.attr,
  262. NULL,
  263. };
  264. ATTRIBUTE_GROUPS(switchtec_device);
  265. static int switchtec_dev_open(struct inode *inode, struct file *filp)
  266. {
  267. struct switchtec_dev *stdev;
  268. struct switchtec_user *stuser;
  269. stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
  270. stuser = stuser_create(stdev);
  271. if (IS_ERR(stuser))
  272. return PTR_ERR(stuser);
  273. filp->private_data = stuser;
  274. nonseekable_open(inode, filp);
  275. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  276. return 0;
  277. }
  278. static int switchtec_dev_release(struct inode *inode, struct file *filp)
  279. {
  280. struct switchtec_user *stuser = filp->private_data;
  281. stuser_put(stuser);
  282. return 0;
  283. }
  284. static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
  285. {
  286. if (mutex_lock_interruptible(&stdev->mrpc_mutex))
  287. return -EINTR;
  288. if (!stdev->alive) {
  289. mutex_unlock(&stdev->mrpc_mutex);
  290. return -ENODEV;
  291. }
  292. return 0;
  293. }
  294. static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
  295. size_t size, loff_t *off)
  296. {
  297. struct switchtec_user *stuser = filp->private_data;
  298. struct switchtec_dev *stdev = stuser->stdev;
  299. int rc;
  300. if (size < sizeof(stuser->cmd) ||
  301. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  302. return -EINVAL;
  303. stuser->data_len = size - sizeof(stuser->cmd);
  304. rc = lock_mutex_and_test_alive(stdev);
  305. if (rc)
  306. return rc;
  307. if (stuser->state != MRPC_IDLE) {
  308. rc = -EBADE;
  309. goto out;
  310. }
  311. rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
  312. if (rc) {
  313. rc = -EFAULT;
  314. goto out;
  315. }
  316. data += sizeof(stuser->cmd);
  317. rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
  318. if (rc) {
  319. rc = -EFAULT;
  320. goto out;
  321. }
  322. rc = mrpc_queue_cmd(stuser);
  323. out:
  324. mutex_unlock(&stdev->mrpc_mutex);
  325. if (rc)
  326. return rc;
  327. return size;
  328. }
  329. static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
  330. size_t size, loff_t *off)
  331. {
  332. struct switchtec_user *stuser = filp->private_data;
  333. struct switchtec_dev *stdev = stuser->stdev;
  334. int rc;
  335. if (size < sizeof(stuser->cmd) ||
  336. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  337. return -EINVAL;
  338. rc = lock_mutex_and_test_alive(stdev);
  339. if (rc)
  340. return rc;
  341. if (stuser->state == MRPC_IDLE) {
  342. mutex_unlock(&stdev->mrpc_mutex);
  343. return -EBADE;
  344. }
  345. stuser->read_len = size - sizeof(stuser->return_code);
  346. mutex_unlock(&stdev->mrpc_mutex);
  347. if (filp->f_flags & O_NONBLOCK) {
  348. if (!try_wait_for_completion(&stuser->comp))
  349. return -EAGAIN;
  350. } else {
  351. rc = wait_for_completion_interruptible(&stuser->comp);
  352. if (rc < 0)
  353. return rc;
  354. }
  355. rc = lock_mutex_and_test_alive(stdev);
  356. if (rc)
  357. return rc;
  358. if (stuser->state != MRPC_DONE) {
  359. mutex_unlock(&stdev->mrpc_mutex);
  360. return -EBADE;
  361. }
  362. rc = copy_to_user(data, &stuser->return_code,
  363. sizeof(stuser->return_code));
  364. if (rc) {
  365. rc = -EFAULT;
  366. goto out;
  367. }
  368. data += sizeof(stuser->return_code);
  369. rc = copy_to_user(data, &stuser->data,
  370. size - sizeof(stuser->return_code));
  371. if (rc) {
  372. rc = -EFAULT;
  373. goto out;
  374. }
  375. stuser_set_state(stuser, MRPC_IDLE);
  376. out:
  377. mutex_unlock(&stdev->mrpc_mutex);
  378. if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
  379. return size;
  380. else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
  381. return -ENXIO;
  382. else
  383. return -EBADMSG;
  384. }
  385. static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
  386. {
  387. struct switchtec_user *stuser = filp->private_data;
  388. struct switchtec_dev *stdev = stuser->stdev;
  389. __poll_t ret = 0;
  390. poll_wait(filp, &stuser->comp.wait, wait);
  391. poll_wait(filp, &stdev->event_wq, wait);
  392. if (lock_mutex_and_test_alive(stdev))
  393. return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
  394. mutex_unlock(&stdev->mrpc_mutex);
  395. if (try_wait_for_completion(&stuser->comp))
  396. ret |= EPOLLIN | EPOLLRDNORM;
  397. if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
  398. ret |= EPOLLPRI | EPOLLRDBAND;
  399. return ret;
  400. }
  401. static int ioctl_flash_info(struct switchtec_dev *stdev,
  402. struct switchtec_ioctl_flash_info __user *uinfo)
  403. {
  404. struct switchtec_ioctl_flash_info info = {0};
  405. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  406. info.flash_length = ioread32(&fi->flash_length);
  407. info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
  408. if (copy_to_user(uinfo, &info, sizeof(info)))
  409. return -EFAULT;
  410. return 0;
  411. }
  412. static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
  413. struct partition_info __iomem *pi)
  414. {
  415. info->address = ioread32(&pi->address);
  416. info->length = ioread32(&pi->length);
  417. }
  418. static int ioctl_flash_part_info(struct switchtec_dev *stdev,
  419. struct switchtec_ioctl_flash_part_info __user *uinfo)
  420. {
  421. struct switchtec_ioctl_flash_part_info info = {0};
  422. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  423. struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
  424. u32 active_addr = -1;
  425. if (copy_from_user(&info, uinfo, sizeof(info)))
  426. return -EFAULT;
  427. switch (info.flash_partition) {
  428. case SWITCHTEC_IOCTL_PART_CFG0:
  429. active_addr = ioread32(&fi->active_cfg);
  430. set_fw_info_part(&info, &fi->cfg0);
  431. if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
  432. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  433. break;
  434. case SWITCHTEC_IOCTL_PART_CFG1:
  435. active_addr = ioread32(&fi->active_cfg);
  436. set_fw_info_part(&info, &fi->cfg1);
  437. if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
  438. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  439. break;
  440. case SWITCHTEC_IOCTL_PART_IMG0:
  441. active_addr = ioread32(&fi->active_img);
  442. set_fw_info_part(&info, &fi->img0);
  443. if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
  444. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  445. break;
  446. case SWITCHTEC_IOCTL_PART_IMG1:
  447. active_addr = ioread32(&fi->active_img);
  448. set_fw_info_part(&info, &fi->img1);
  449. if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
  450. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  451. break;
  452. case SWITCHTEC_IOCTL_PART_NVLOG:
  453. set_fw_info_part(&info, &fi->nvlog);
  454. break;
  455. case SWITCHTEC_IOCTL_PART_VENDOR0:
  456. set_fw_info_part(&info, &fi->vendor[0]);
  457. break;
  458. case SWITCHTEC_IOCTL_PART_VENDOR1:
  459. set_fw_info_part(&info, &fi->vendor[1]);
  460. break;
  461. case SWITCHTEC_IOCTL_PART_VENDOR2:
  462. set_fw_info_part(&info, &fi->vendor[2]);
  463. break;
  464. case SWITCHTEC_IOCTL_PART_VENDOR3:
  465. set_fw_info_part(&info, &fi->vendor[3]);
  466. break;
  467. case SWITCHTEC_IOCTL_PART_VENDOR4:
  468. set_fw_info_part(&info, &fi->vendor[4]);
  469. break;
  470. case SWITCHTEC_IOCTL_PART_VENDOR5:
  471. set_fw_info_part(&info, &fi->vendor[5]);
  472. break;
  473. case SWITCHTEC_IOCTL_PART_VENDOR6:
  474. set_fw_info_part(&info, &fi->vendor[6]);
  475. break;
  476. case SWITCHTEC_IOCTL_PART_VENDOR7:
  477. set_fw_info_part(&info, &fi->vendor[7]);
  478. break;
  479. default:
  480. return -EINVAL;
  481. }
  482. if (info.address == active_addr)
  483. info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  484. if (copy_to_user(uinfo, &info, sizeof(info)))
  485. return -EFAULT;
  486. return 0;
  487. }
  488. static int ioctl_event_summary(struct switchtec_dev *stdev,
  489. struct switchtec_user *stuser,
  490. struct switchtec_ioctl_event_summary __user *usum)
  491. {
  492. struct switchtec_ioctl_event_summary s = {0};
  493. int i;
  494. u32 reg;
  495. s.global = ioread32(&stdev->mmio_sw_event->global_summary);
  496. s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
  497. s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
  498. for (i = 0; i < stdev->partition_count; i++) {
  499. reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
  500. s.part[i] = reg;
  501. }
  502. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  503. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  504. if (reg != PCI_VENDOR_ID_MICROSEMI)
  505. break;
  506. reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
  507. s.pff[i] = reg;
  508. }
  509. if (copy_to_user(usum, &s, sizeof(s)))
  510. return -EFAULT;
  511. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  512. return 0;
  513. }
  514. static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
  515. size_t offset, int index)
  516. {
  517. return (void __iomem *)stdev->mmio_sw_event + offset;
  518. }
  519. static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
  520. size_t offset, int index)
  521. {
  522. return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
  523. }
  524. static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
  525. size_t offset, int index)
  526. {
  527. return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
  528. }
  529. #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
  530. #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
  531. #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
  532. static const struct event_reg {
  533. size_t offset;
  534. u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
  535. size_t offset, int index);
  536. } event_regs[] = {
  537. EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
  538. EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
  539. EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
  540. EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
  541. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
  542. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
  543. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
  544. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
  545. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
  546. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
  547. twi_mrpc_comp_async_hdr),
  548. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
  549. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
  550. cli_mrpc_comp_async_hdr),
  551. EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
  552. EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
  553. EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
  554. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
  555. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
  556. EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
  557. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
  558. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
  559. EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
  560. EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
  561. EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
  562. EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
  563. EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
  564. EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
  565. EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
  566. EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
  567. EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
  568. EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
  569. };
  570. static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
  571. int event_id, int index)
  572. {
  573. size_t off;
  574. if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  575. return ERR_PTR(-EINVAL);
  576. off = event_regs[event_id].offset;
  577. if (event_regs[event_id].map_reg == part_ev_reg) {
  578. if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  579. index = stdev->partition;
  580. else if (index < 0 || index >= stdev->partition_count)
  581. return ERR_PTR(-EINVAL);
  582. } else if (event_regs[event_id].map_reg == pff_ev_reg) {
  583. if (index < 0 || index >= stdev->pff_csr_count)
  584. return ERR_PTR(-EINVAL);
  585. }
  586. return event_regs[event_id].map_reg(stdev, off, index);
  587. }
  588. static int event_ctl(struct switchtec_dev *stdev,
  589. struct switchtec_ioctl_event_ctl *ctl)
  590. {
  591. int i;
  592. u32 __iomem *reg;
  593. u32 hdr;
  594. reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
  595. if (IS_ERR(reg))
  596. return PTR_ERR(reg);
  597. hdr = ioread32(reg);
  598. for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
  599. ctl->data[i] = ioread32(&reg[i + 1]);
  600. ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
  601. ctl->count = (hdr >> 5) & 0xFF;
  602. if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
  603. hdr &= ~SWITCHTEC_EVENT_CLEAR;
  604. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
  605. hdr |= SWITCHTEC_EVENT_EN_IRQ;
  606. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
  607. hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
  608. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
  609. hdr |= SWITCHTEC_EVENT_EN_LOG;
  610. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
  611. hdr &= ~SWITCHTEC_EVENT_EN_LOG;
  612. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
  613. hdr |= SWITCHTEC_EVENT_EN_CLI;
  614. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
  615. hdr &= ~SWITCHTEC_EVENT_EN_CLI;
  616. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
  617. hdr |= SWITCHTEC_EVENT_FATAL;
  618. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
  619. hdr &= ~SWITCHTEC_EVENT_FATAL;
  620. if (ctl->flags)
  621. iowrite32(hdr, reg);
  622. ctl->flags = 0;
  623. if (hdr & SWITCHTEC_EVENT_EN_IRQ)
  624. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
  625. if (hdr & SWITCHTEC_EVENT_EN_LOG)
  626. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
  627. if (hdr & SWITCHTEC_EVENT_EN_CLI)
  628. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
  629. if (hdr & SWITCHTEC_EVENT_FATAL)
  630. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
  631. return 0;
  632. }
  633. static int ioctl_event_ctl(struct switchtec_dev *stdev,
  634. struct switchtec_ioctl_event_ctl __user *uctl)
  635. {
  636. int ret;
  637. int nr_idxs;
  638. struct switchtec_ioctl_event_ctl ctl;
  639. if (copy_from_user(&ctl, uctl, sizeof(ctl)))
  640. return -EFAULT;
  641. if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  642. return -EINVAL;
  643. if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
  644. return -EINVAL;
  645. if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
  646. if (event_regs[ctl.event_id].map_reg == global_ev_reg)
  647. nr_idxs = 1;
  648. else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
  649. nr_idxs = stdev->partition_count;
  650. else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
  651. nr_idxs = stdev->pff_csr_count;
  652. else
  653. return -EINVAL;
  654. for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
  655. ret = event_ctl(stdev, &ctl);
  656. if (ret < 0)
  657. return ret;
  658. }
  659. } else {
  660. ret = event_ctl(stdev, &ctl);
  661. if (ret < 0)
  662. return ret;
  663. }
  664. if (copy_to_user(uctl, &ctl, sizeof(ctl)))
  665. return -EFAULT;
  666. return 0;
  667. }
  668. static int ioctl_pff_to_port(struct switchtec_dev *stdev,
  669. struct switchtec_ioctl_pff_port *up)
  670. {
  671. int i, part;
  672. u32 reg;
  673. struct part_cfg_regs *pcfg;
  674. struct switchtec_ioctl_pff_port p;
  675. if (copy_from_user(&p, up, sizeof(p)))
  676. return -EFAULT;
  677. p.port = -1;
  678. for (part = 0; part < stdev->partition_count; part++) {
  679. pcfg = &stdev->mmio_part_cfg_all[part];
  680. p.partition = part;
  681. reg = ioread32(&pcfg->usp_pff_inst_id);
  682. if (reg == p.pff) {
  683. p.port = 0;
  684. break;
  685. }
  686. reg = ioread32(&pcfg->vep_pff_inst_id);
  687. if (reg == p.pff) {
  688. p.port = SWITCHTEC_IOCTL_PFF_VEP;
  689. break;
  690. }
  691. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  692. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  693. if (reg != p.pff)
  694. continue;
  695. p.port = i + 1;
  696. break;
  697. }
  698. if (p.port != -1)
  699. break;
  700. }
  701. if (copy_to_user(up, &p, sizeof(p)))
  702. return -EFAULT;
  703. return 0;
  704. }
  705. static int ioctl_port_to_pff(struct switchtec_dev *stdev,
  706. struct switchtec_ioctl_pff_port *up)
  707. {
  708. struct switchtec_ioctl_pff_port p;
  709. struct part_cfg_regs *pcfg;
  710. if (copy_from_user(&p, up, sizeof(p)))
  711. return -EFAULT;
  712. if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  713. pcfg = stdev->mmio_part_cfg;
  714. else if (p.partition < stdev->partition_count)
  715. pcfg = &stdev->mmio_part_cfg_all[p.partition];
  716. else
  717. return -EINVAL;
  718. switch (p.port) {
  719. case 0:
  720. p.pff = ioread32(&pcfg->usp_pff_inst_id);
  721. break;
  722. case SWITCHTEC_IOCTL_PFF_VEP:
  723. p.pff = ioread32(&pcfg->vep_pff_inst_id);
  724. break;
  725. default:
  726. if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
  727. return -EINVAL;
  728. p.port = array_index_nospec(p.port,
  729. ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
  730. p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
  731. break;
  732. }
  733. if (copy_to_user(up, &p, sizeof(p)))
  734. return -EFAULT;
  735. return 0;
  736. }
  737. static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
  738. unsigned long arg)
  739. {
  740. struct switchtec_user *stuser = filp->private_data;
  741. struct switchtec_dev *stdev = stuser->stdev;
  742. int rc;
  743. void __user *argp = (void __user *)arg;
  744. rc = lock_mutex_and_test_alive(stdev);
  745. if (rc)
  746. return rc;
  747. switch (cmd) {
  748. case SWITCHTEC_IOCTL_FLASH_INFO:
  749. rc = ioctl_flash_info(stdev, argp);
  750. break;
  751. case SWITCHTEC_IOCTL_FLASH_PART_INFO:
  752. rc = ioctl_flash_part_info(stdev, argp);
  753. break;
  754. case SWITCHTEC_IOCTL_EVENT_SUMMARY:
  755. rc = ioctl_event_summary(stdev, stuser, argp);
  756. break;
  757. case SWITCHTEC_IOCTL_EVENT_CTL:
  758. rc = ioctl_event_ctl(stdev, argp);
  759. break;
  760. case SWITCHTEC_IOCTL_PFF_TO_PORT:
  761. rc = ioctl_pff_to_port(stdev, argp);
  762. break;
  763. case SWITCHTEC_IOCTL_PORT_TO_PFF:
  764. rc = ioctl_port_to_pff(stdev, argp);
  765. break;
  766. default:
  767. rc = -ENOTTY;
  768. break;
  769. }
  770. mutex_unlock(&stdev->mrpc_mutex);
  771. return rc;
  772. }
  773. static const struct file_operations switchtec_fops = {
  774. .owner = THIS_MODULE,
  775. .open = switchtec_dev_open,
  776. .release = switchtec_dev_release,
  777. .write = switchtec_dev_write,
  778. .read = switchtec_dev_read,
  779. .poll = switchtec_dev_poll,
  780. .unlocked_ioctl = switchtec_dev_ioctl,
  781. .compat_ioctl = switchtec_dev_ioctl,
  782. };
  783. static void link_event_work(struct work_struct *work)
  784. {
  785. struct switchtec_dev *stdev;
  786. stdev = container_of(work, struct switchtec_dev, link_event_work);
  787. if (stdev->link_notifier)
  788. stdev->link_notifier(stdev);
  789. }
  790. static void check_link_state_events(struct switchtec_dev *stdev)
  791. {
  792. int idx;
  793. u32 reg;
  794. int count;
  795. int occurred = 0;
  796. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  797. reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
  798. dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
  799. count = (reg >> 5) & 0xFF;
  800. if (count != stdev->link_event_count[idx]) {
  801. occurred = 1;
  802. stdev->link_event_count[idx] = count;
  803. }
  804. }
  805. if (occurred)
  806. schedule_work(&stdev->link_event_work);
  807. }
  808. static void enable_link_state_events(struct switchtec_dev *stdev)
  809. {
  810. int idx;
  811. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  812. iowrite32(SWITCHTEC_EVENT_CLEAR |
  813. SWITCHTEC_EVENT_EN_IRQ,
  814. &stdev->mmio_pff_csr[idx].link_state_hdr);
  815. }
  816. }
  817. static void stdev_release(struct device *dev)
  818. {
  819. struct switchtec_dev *stdev = to_stdev(dev);
  820. kfree(stdev);
  821. }
  822. static void stdev_kill(struct switchtec_dev *stdev)
  823. {
  824. struct switchtec_user *stuser, *tmpuser;
  825. pci_clear_master(stdev->pdev);
  826. cancel_delayed_work_sync(&stdev->mrpc_timeout);
  827. /* Mark the hardware as unavailable and complete all completions */
  828. mutex_lock(&stdev->mrpc_mutex);
  829. stdev->alive = false;
  830. /* Wake up and kill any users waiting on an MRPC request */
  831. list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
  832. complete_all(&stuser->comp);
  833. list_del_init(&stuser->list);
  834. stuser_put(stuser);
  835. }
  836. mutex_unlock(&stdev->mrpc_mutex);
  837. /* Wake up any users waiting on event_wq */
  838. wake_up_interruptible(&stdev->event_wq);
  839. }
  840. static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
  841. {
  842. struct switchtec_dev *stdev;
  843. int minor;
  844. struct device *dev;
  845. struct cdev *cdev;
  846. int rc;
  847. stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
  848. dev_to_node(&pdev->dev));
  849. if (!stdev)
  850. return ERR_PTR(-ENOMEM);
  851. stdev->alive = true;
  852. stdev->pdev = pdev;
  853. INIT_LIST_HEAD(&stdev->mrpc_queue);
  854. mutex_init(&stdev->mrpc_mutex);
  855. stdev->mrpc_busy = 0;
  856. INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
  857. INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
  858. INIT_WORK(&stdev->link_event_work, link_event_work);
  859. init_waitqueue_head(&stdev->event_wq);
  860. atomic_set(&stdev->event_cnt, 0);
  861. dev = &stdev->dev;
  862. device_initialize(dev);
  863. dev->class = switchtec_class;
  864. dev->parent = &pdev->dev;
  865. dev->groups = switchtec_device_groups;
  866. dev->release = stdev_release;
  867. minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
  868. GFP_KERNEL);
  869. if (minor < 0) {
  870. rc = minor;
  871. goto err_put;
  872. }
  873. dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
  874. dev_set_name(dev, "switchtec%d", minor);
  875. cdev = &stdev->cdev;
  876. cdev_init(cdev, &switchtec_fops);
  877. cdev->owner = THIS_MODULE;
  878. return stdev;
  879. err_put:
  880. put_device(&stdev->dev);
  881. return ERR_PTR(rc);
  882. }
  883. static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
  884. {
  885. size_t off = event_regs[eid].offset;
  886. u32 __iomem *hdr_reg;
  887. u32 hdr;
  888. hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
  889. hdr = ioread32(hdr_reg);
  890. if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
  891. return 0;
  892. if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
  893. return 0;
  894. dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
  895. hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
  896. iowrite32(hdr, hdr_reg);
  897. return 1;
  898. }
  899. static int mask_all_events(struct switchtec_dev *stdev, int eid)
  900. {
  901. int idx;
  902. int count = 0;
  903. if (event_regs[eid].map_reg == part_ev_reg) {
  904. for (idx = 0; idx < stdev->partition_count; idx++)
  905. count += mask_event(stdev, eid, idx);
  906. } else if (event_regs[eid].map_reg == pff_ev_reg) {
  907. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  908. if (!stdev->pff_local[idx])
  909. continue;
  910. count += mask_event(stdev, eid, idx);
  911. }
  912. } else {
  913. count += mask_event(stdev, eid, 0);
  914. }
  915. return count;
  916. }
  917. static irqreturn_t switchtec_event_isr(int irq, void *dev)
  918. {
  919. struct switchtec_dev *stdev = dev;
  920. u32 reg;
  921. irqreturn_t ret = IRQ_NONE;
  922. int eid, event_count = 0;
  923. reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
  924. if (reg & SWITCHTEC_EVENT_OCCURRED) {
  925. dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
  926. ret = IRQ_HANDLED;
  927. schedule_work(&stdev->mrpc_work);
  928. iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
  929. }
  930. check_link_state_events(stdev);
  931. for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
  932. event_count += mask_all_events(stdev, eid);
  933. if (event_count) {
  934. atomic_inc(&stdev->event_cnt);
  935. wake_up_interruptible(&stdev->event_wq);
  936. dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
  937. event_count);
  938. return IRQ_HANDLED;
  939. }
  940. return ret;
  941. }
  942. static int switchtec_init_isr(struct switchtec_dev *stdev)
  943. {
  944. int nvecs;
  945. int event_irq;
  946. nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
  947. PCI_IRQ_MSIX | PCI_IRQ_MSI);
  948. if (nvecs < 0)
  949. return nvecs;
  950. event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
  951. if (event_irq < 0 || event_irq >= nvecs)
  952. return -EFAULT;
  953. event_irq = pci_irq_vector(stdev->pdev, event_irq);
  954. if (event_irq < 0)
  955. return event_irq;
  956. return devm_request_irq(&stdev->pdev->dev, event_irq,
  957. switchtec_event_isr, 0,
  958. KBUILD_MODNAME, stdev);
  959. }
  960. static void init_pff(struct switchtec_dev *stdev)
  961. {
  962. int i;
  963. u32 reg;
  964. struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
  965. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  966. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  967. if (reg != PCI_VENDOR_ID_MICROSEMI)
  968. break;
  969. }
  970. stdev->pff_csr_count = i;
  971. reg = ioread32(&pcfg->usp_pff_inst_id);
  972. if (reg < SWITCHTEC_MAX_PFF_CSR)
  973. stdev->pff_local[reg] = 1;
  974. reg = ioread32(&pcfg->vep_pff_inst_id);
  975. if (reg < SWITCHTEC_MAX_PFF_CSR)
  976. stdev->pff_local[reg] = 1;
  977. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  978. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  979. if (reg < SWITCHTEC_MAX_PFF_CSR)
  980. stdev->pff_local[reg] = 1;
  981. }
  982. }
  983. static int switchtec_init_pci(struct switchtec_dev *stdev,
  984. struct pci_dev *pdev)
  985. {
  986. int rc;
  987. rc = pcim_enable_device(pdev);
  988. if (rc)
  989. return rc;
  990. rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
  991. if (rc)
  992. return rc;
  993. pci_set_master(pdev);
  994. stdev->mmio = pcim_iomap_table(pdev)[0];
  995. stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
  996. stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
  997. stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
  998. stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
  999. stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
  1000. stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
  1001. stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
  1002. stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
  1003. stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
  1004. stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
  1005. if (stdev->partition_count < 1)
  1006. stdev->partition_count = 1;
  1007. init_pff(stdev);
  1008. pci_set_drvdata(pdev, stdev);
  1009. return 0;
  1010. }
  1011. static int switchtec_pci_probe(struct pci_dev *pdev,
  1012. const struct pci_device_id *id)
  1013. {
  1014. struct switchtec_dev *stdev;
  1015. int rc;
  1016. if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
  1017. request_module_nowait("ntb_hw_switchtec");
  1018. stdev = stdev_create(pdev);
  1019. if (IS_ERR(stdev))
  1020. return PTR_ERR(stdev);
  1021. rc = switchtec_init_pci(stdev, pdev);
  1022. if (rc)
  1023. goto err_put;
  1024. rc = switchtec_init_isr(stdev);
  1025. if (rc) {
  1026. dev_err(&stdev->dev, "failed to init isr.\n");
  1027. goto err_put;
  1028. }
  1029. iowrite32(SWITCHTEC_EVENT_CLEAR |
  1030. SWITCHTEC_EVENT_EN_IRQ,
  1031. &stdev->mmio_part_cfg->mrpc_comp_hdr);
  1032. enable_link_state_events(stdev);
  1033. rc = cdev_device_add(&stdev->cdev, &stdev->dev);
  1034. if (rc)
  1035. goto err_devadd;
  1036. dev_info(&stdev->dev, "Management device registered.\n");
  1037. return 0;
  1038. err_devadd:
  1039. stdev_kill(stdev);
  1040. err_put:
  1041. ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1042. put_device(&stdev->dev);
  1043. return rc;
  1044. }
  1045. static void switchtec_pci_remove(struct pci_dev *pdev)
  1046. {
  1047. struct switchtec_dev *stdev = pci_get_drvdata(pdev);
  1048. pci_set_drvdata(pdev, NULL);
  1049. cdev_device_del(&stdev->cdev, &stdev->dev);
  1050. ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1051. dev_info(&stdev->dev, "unregistered.\n");
  1052. stdev_kill(stdev);
  1053. put_device(&stdev->dev);
  1054. }
  1055. #define SWITCHTEC_PCI_DEVICE(device_id) \
  1056. { \
  1057. .vendor = PCI_VENDOR_ID_MICROSEMI, \
  1058. .device = device_id, \
  1059. .subvendor = PCI_ANY_ID, \
  1060. .subdevice = PCI_ANY_ID, \
  1061. .class = (PCI_CLASS_MEMORY_OTHER << 8), \
  1062. .class_mask = 0xFFFFFFFF, \
  1063. }, \
  1064. { \
  1065. .vendor = PCI_VENDOR_ID_MICROSEMI, \
  1066. .device = device_id, \
  1067. .subvendor = PCI_ANY_ID, \
  1068. .subdevice = PCI_ANY_ID, \
  1069. .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
  1070. .class_mask = 0xFFFFFFFF, \
  1071. }
  1072. static const struct pci_device_id switchtec_pci_tbl[] = {
  1073. SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
  1074. SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
  1075. SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
  1076. SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
  1077. SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
  1078. SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
  1079. SWITCHTEC_PCI_DEVICE(0x8541), //PSX 24xG3
  1080. SWITCHTEC_PCI_DEVICE(0x8542), //PSX 32xG3
  1081. SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
  1082. SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
  1083. SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
  1084. SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
  1085. SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3
  1086. SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3
  1087. SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3
  1088. SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3
  1089. SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3
  1090. SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3
  1091. SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3
  1092. SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3
  1093. SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3
  1094. SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3
  1095. SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3
  1096. SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3
  1097. SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3
  1098. SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3
  1099. SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3
  1100. SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3
  1101. SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3
  1102. SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3
  1103. {0}
  1104. };
  1105. MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
  1106. static struct pci_driver switchtec_pci_driver = {
  1107. .name = KBUILD_MODNAME,
  1108. .id_table = switchtec_pci_tbl,
  1109. .probe = switchtec_pci_probe,
  1110. .remove = switchtec_pci_remove,
  1111. };
  1112. static int __init switchtec_init(void)
  1113. {
  1114. int rc;
  1115. rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
  1116. "switchtec");
  1117. if (rc)
  1118. return rc;
  1119. switchtec_class = class_create(THIS_MODULE, "switchtec");
  1120. if (IS_ERR(switchtec_class)) {
  1121. rc = PTR_ERR(switchtec_class);
  1122. goto err_create_class;
  1123. }
  1124. rc = pci_register_driver(&switchtec_pci_driver);
  1125. if (rc)
  1126. goto err_pci_register;
  1127. pr_info(KBUILD_MODNAME ": loaded.\n");
  1128. return 0;
  1129. err_pci_register:
  1130. class_destroy(switchtec_class);
  1131. err_create_class:
  1132. unregister_chrdev_region(switchtec_devt, max_devices);
  1133. return rc;
  1134. }
  1135. module_init(switchtec_init);
  1136. static void __exit switchtec_exit(void)
  1137. {
  1138. pci_unregister_driver(&switchtec_pci_driver);
  1139. class_destroy(switchtec_class);
  1140. unregister_chrdev_region(switchtec_devt, max_devices);
  1141. ida_destroy(&switchtec_minor_ida);
  1142. pr_info(KBUILD_MODNAME ": unloaded.\n");
  1143. }
  1144. module_exit(switchtec_exit);