switchtec.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430
  1. /*
  2. * Microsemi Switchtec(tm) PCIe Management Driver
  3. * Copyright (c) 2017, Microsemi Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. */
  15. #include <linux/switchtec.h>
  16. #include <linux/switchtec_ioctl.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/module.h>
  19. #include <linux/fs.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/poll.h>
  22. #include <linux/wait.h>
  23. MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
  24. MODULE_VERSION("0.1");
  25. MODULE_LICENSE("GPL");
  26. MODULE_AUTHOR("Microsemi Corporation");
  27. static int max_devices = 16;
  28. module_param(max_devices, int, 0644);
  29. MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
  30. static dev_t switchtec_devt;
  31. static DEFINE_IDA(switchtec_minor_ida);
  32. struct class *switchtec_class;
  33. EXPORT_SYMBOL_GPL(switchtec_class);
  34. enum mrpc_state {
  35. MRPC_IDLE = 0,
  36. MRPC_QUEUED,
  37. MRPC_RUNNING,
  38. MRPC_DONE,
  39. };
  40. struct switchtec_user {
  41. struct switchtec_dev *stdev;
  42. enum mrpc_state state;
  43. struct completion comp;
  44. struct kref kref;
  45. struct list_head list;
  46. u32 cmd;
  47. u32 status;
  48. u32 return_code;
  49. size_t data_len;
  50. size_t read_len;
  51. unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
  52. int event_cnt;
  53. };
  54. static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
  55. {
  56. struct switchtec_user *stuser;
  57. stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
  58. if (!stuser)
  59. return ERR_PTR(-ENOMEM);
  60. get_device(&stdev->dev);
  61. stuser->stdev = stdev;
  62. kref_init(&stuser->kref);
  63. INIT_LIST_HEAD(&stuser->list);
  64. init_completion(&stuser->comp);
  65. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  66. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  67. return stuser;
  68. }
  69. static void stuser_free(struct kref *kref)
  70. {
  71. struct switchtec_user *stuser;
  72. stuser = container_of(kref, struct switchtec_user, kref);
  73. dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
  74. put_device(&stuser->stdev->dev);
  75. kfree(stuser);
  76. }
  77. static void stuser_put(struct switchtec_user *stuser)
  78. {
  79. kref_put(&stuser->kref, stuser_free);
  80. }
  81. static void stuser_set_state(struct switchtec_user *stuser,
  82. enum mrpc_state state)
  83. {
  84. /* requires the mrpc_mutex to already be held when called */
  85. const char * const state_names[] = {
  86. [MRPC_IDLE] = "IDLE",
  87. [MRPC_QUEUED] = "QUEUED",
  88. [MRPC_RUNNING] = "RUNNING",
  89. [MRPC_DONE] = "DONE",
  90. };
  91. stuser->state = state;
  92. dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
  93. stuser, state_names[state]);
  94. }
  95. static void mrpc_complete_cmd(struct switchtec_dev *stdev);
  96. static void mrpc_cmd_submit(struct switchtec_dev *stdev)
  97. {
  98. /* requires the mrpc_mutex to already be held when called */
  99. struct switchtec_user *stuser;
  100. if (stdev->mrpc_busy)
  101. return;
  102. if (list_empty(&stdev->mrpc_queue))
  103. return;
  104. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  105. list);
  106. stuser_set_state(stuser, MRPC_RUNNING);
  107. stdev->mrpc_busy = 1;
  108. memcpy_toio(&stdev->mmio_mrpc->input_data,
  109. stuser->data, stuser->data_len);
  110. iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
  111. stuser->status = ioread32(&stdev->mmio_mrpc->status);
  112. if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
  113. mrpc_complete_cmd(stdev);
  114. schedule_delayed_work(&stdev->mrpc_timeout,
  115. msecs_to_jiffies(500));
  116. }
  117. static int mrpc_queue_cmd(struct switchtec_user *stuser)
  118. {
  119. /* requires the mrpc_mutex to already be held when called */
  120. struct switchtec_dev *stdev = stuser->stdev;
  121. kref_get(&stuser->kref);
  122. stuser->read_len = sizeof(stuser->data);
  123. stuser_set_state(stuser, MRPC_QUEUED);
  124. init_completion(&stuser->comp);
  125. list_add_tail(&stuser->list, &stdev->mrpc_queue);
  126. mrpc_cmd_submit(stdev);
  127. return 0;
  128. }
  129. static void mrpc_complete_cmd(struct switchtec_dev *stdev)
  130. {
  131. /* requires the mrpc_mutex to already be held when called */
  132. struct switchtec_user *stuser;
  133. if (list_empty(&stdev->mrpc_queue))
  134. return;
  135. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  136. list);
  137. stuser->status = ioread32(&stdev->mmio_mrpc->status);
  138. if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
  139. return;
  140. stuser_set_state(stuser, MRPC_DONE);
  141. stuser->return_code = 0;
  142. if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
  143. goto out;
  144. stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
  145. if (stuser->return_code != 0)
  146. goto out;
  147. memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
  148. stuser->read_len);
  149. out:
  150. complete_all(&stuser->comp);
  151. list_del_init(&stuser->list);
  152. stuser_put(stuser);
  153. stdev->mrpc_busy = 0;
  154. mrpc_cmd_submit(stdev);
  155. }
  156. static void mrpc_event_work(struct work_struct *work)
  157. {
  158. struct switchtec_dev *stdev;
  159. stdev = container_of(work, struct switchtec_dev, mrpc_work);
  160. dev_dbg(&stdev->dev, "%s\n", __func__);
  161. mutex_lock(&stdev->mrpc_mutex);
  162. cancel_delayed_work(&stdev->mrpc_timeout);
  163. mrpc_complete_cmd(stdev);
  164. mutex_unlock(&stdev->mrpc_mutex);
  165. }
  166. static void mrpc_timeout_work(struct work_struct *work)
  167. {
  168. struct switchtec_dev *stdev;
  169. u32 status;
  170. stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
  171. dev_dbg(&stdev->dev, "%s\n", __func__);
  172. mutex_lock(&stdev->mrpc_mutex);
  173. status = ioread32(&stdev->mmio_mrpc->status);
  174. if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
  175. schedule_delayed_work(&stdev->mrpc_timeout,
  176. msecs_to_jiffies(500));
  177. goto out;
  178. }
  179. mrpc_complete_cmd(stdev);
  180. out:
  181. mutex_unlock(&stdev->mrpc_mutex);
  182. }
  183. static ssize_t device_version_show(struct device *dev,
  184. struct device_attribute *attr, char *buf)
  185. {
  186. struct switchtec_dev *stdev = to_stdev(dev);
  187. u32 ver;
  188. ver = ioread32(&stdev->mmio_sys_info->device_version);
  189. return sprintf(buf, "%x\n", ver);
  190. }
  191. static DEVICE_ATTR_RO(device_version);
  192. static ssize_t fw_version_show(struct device *dev,
  193. struct device_attribute *attr, char *buf)
  194. {
  195. struct switchtec_dev *stdev = to_stdev(dev);
  196. u32 ver;
  197. ver = ioread32(&stdev->mmio_sys_info->firmware_version);
  198. return sprintf(buf, "%08x\n", ver);
  199. }
  200. static DEVICE_ATTR_RO(fw_version);
  201. static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
  202. {
  203. int i;
  204. memcpy_fromio(buf, attr, len);
  205. buf[len] = '\n';
  206. buf[len + 1] = 0;
  207. for (i = len - 1; i > 0; i--) {
  208. if (buf[i] != ' ')
  209. break;
  210. buf[i] = '\n';
  211. buf[i + 1] = 0;
  212. }
  213. return strlen(buf);
  214. }
  215. #define DEVICE_ATTR_SYS_INFO_STR(field) \
  216. static ssize_t field ## _show(struct device *dev, \
  217. struct device_attribute *attr, char *buf) \
  218. { \
  219. struct switchtec_dev *stdev = to_stdev(dev); \
  220. return io_string_show(buf, &stdev->mmio_sys_info->field, \
  221. sizeof(stdev->mmio_sys_info->field)); \
  222. } \
  223. \
  224. static DEVICE_ATTR_RO(field)
  225. DEVICE_ATTR_SYS_INFO_STR(vendor_id);
  226. DEVICE_ATTR_SYS_INFO_STR(product_id);
  227. DEVICE_ATTR_SYS_INFO_STR(product_revision);
  228. DEVICE_ATTR_SYS_INFO_STR(component_vendor);
  229. static ssize_t component_id_show(struct device *dev,
  230. struct device_attribute *attr, char *buf)
  231. {
  232. struct switchtec_dev *stdev = to_stdev(dev);
  233. int id = ioread16(&stdev->mmio_sys_info->component_id);
  234. return sprintf(buf, "PM%04X\n", id);
  235. }
  236. static DEVICE_ATTR_RO(component_id);
  237. static ssize_t component_revision_show(struct device *dev,
  238. struct device_attribute *attr, char *buf)
  239. {
  240. struct switchtec_dev *stdev = to_stdev(dev);
  241. int rev = ioread8(&stdev->mmio_sys_info->component_revision);
  242. return sprintf(buf, "%d\n", rev);
  243. }
  244. static DEVICE_ATTR_RO(component_revision);
  245. static ssize_t partition_show(struct device *dev,
  246. struct device_attribute *attr, char *buf)
  247. {
  248. struct switchtec_dev *stdev = to_stdev(dev);
  249. return sprintf(buf, "%d\n", stdev->partition);
  250. }
  251. static DEVICE_ATTR_RO(partition);
  252. static ssize_t partition_count_show(struct device *dev,
  253. struct device_attribute *attr, char *buf)
  254. {
  255. struct switchtec_dev *stdev = to_stdev(dev);
  256. return sprintf(buf, "%d\n", stdev->partition_count);
  257. }
  258. static DEVICE_ATTR_RO(partition_count);
  259. static struct attribute *switchtec_device_attrs[] = {
  260. &dev_attr_device_version.attr,
  261. &dev_attr_fw_version.attr,
  262. &dev_attr_vendor_id.attr,
  263. &dev_attr_product_id.attr,
  264. &dev_attr_product_revision.attr,
  265. &dev_attr_component_vendor.attr,
  266. &dev_attr_component_id.attr,
  267. &dev_attr_component_revision.attr,
  268. &dev_attr_partition.attr,
  269. &dev_attr_partition_count.attr,
  270. NULL,
  271. };
  272. ATTRIBUTE_GROUPS(switchtec_device);
  273. static int switchtec_dev_open(struct inode *inode, struct file *filp)
  274. {
  275. struct switchtec_dev *stdev;
  276. struct switchtec_user *stuser;
  277. stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
  278. stuser = stuser_create(stdev);
  279. if (IS_ERR(stuser))
  280. return PTR_ERR(stuser);
  281. filp->private_data = stuser;
  282. nonseekable_open(inode, filp);
  283. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  284. return 0;
  285. }
  286. static int switchtec_dev_release(struct inode *inode, struct file *filp)
  287. {
  288. struct switchtec_user *stuser = filp->private_data;
  289. stuser_put(stuser);
  290. return 0;
  291. }
  292. static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
  293. {
  294. if (mutex_lock_interruptible(&stdev->mrpc_mutex))
  295. return -EINTR;
  296. if (!stdev->alive) {
  297. mutex_unlock(&stdev->mrpc_mutex);
  298. return -ENODEV;
  299. }
  300. return 0;
  301. }
  302. static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
  303. size_t size, loff_t *off)
  304. {
  305. struct switchtec_user *stuser = filp->private_data;
  306. struct switchtec_dev *stdev = stuser->stdev;
  307. int rc;
  308. if (size < sizeof(stuser->cmd) ||
  309. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  310. return -EINVAL;
  311. stuser->data_len = size - sizeof(stuser->cmd);
  312. rc = lock_mutex_and_test_alive(stdev);
  313. if (rc)
  314. return rc;
  315. if (stuser->state != MRPC_IDLE) {
  316. rc = -EBADE;
  317. goto out;
  318. }
  319. rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
  320. if (rc) {
  321. rc = -EFAULT;
  322. goto out;
  323. }
  324. data += sizeof(stuser->cmd);
  325. rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
  326. if (rc) {
  327. rc = -EFAULT;
  328. goto out;
  329. }
  330. rc = mrpc_queue_cmd(stuser);
  331. out:
  332. mutex_unlock(&stdev->mrpc_mutex);
  333. if (rc)
  334. return rc;
  335. return size;
  336. }
  337. static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
  338. size_t size, loff_t *off)
  339. {
  340. struct switchtec_user *stuser = filp->private_data;
  341. struct switchtec_dev *stdev = stuser->stdev;
  342. int rc;
  343. if (size < sizeof(stuser->cmd) ||
  344. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  345. return -EINVAL;
  346. rc = lock_mutex_and_test_alive(stdev);
  347. if (rc)
  348. return rc;
  349. if (stuser->state == MRPC_IDLE) {
  350. mutex_unlock(&stdev->mrpc_mutex);
  351. return -EBADE;
  352. }
  353. stuser->read_len = size - sizeof(stuser->return_code);
  354. mutex_unlock(&stdev->mrpc_mutex);
  355. if (filp->f_flags & O_NONBLOCK) {
  356. if (!try_wait_for_completion(&stuser->comp))
  357. return -EAGAIN;
  358. } else {
  359. rc = wait_for_completion_interruptible(&stuser->comp);
  360. if (rc < 0)
  361. return rc;
  362. }
  363. rc = lock_mutex_and_test_alive(stdev);
  364. if (rc)
  365. return rc;
  366. if (stuser->state != MRPC_DONE) {
  367. mutex_unlock(&stdev->mrpc_mutex);
  368. return -EBADE;
  369. }
  370. rc = copy_to_user(data, &stuser->return_code,
  371. sizeof(stuser->return_code));
  372. if (rc) {
  373. rc = -EFAULT;
  374. goto out;
  375. }
  376. data += sizeof(stuser->return_code);
  377. rc = copy_to_user(data, &stuser->data,
  378. size - sizeof(stuser->return_code));
  379. if (rc) {
  380. rc = -EFAULT;
  381. goto out;
  382. }
  383. stuser_set_state(stuser, MRPC_IDLE);
  384. out:
  385. mutex_unlock(&stdev->mrpc_mutex);
  386. if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
  387. return size;
  388. else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
  389. return -ENXIO;
  390. else
  391. return -EBADMSG;
  392. }
  393. static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
  394. {
  395. struct switchtec_user *stuser = filp->private_data;
  396. struct switchtec_dev *stdev = stuser->stdev;
  397. __poll_t ret = 0;
  398. poll_wait(filp, &stuser->comp.wait, wait);
  399. poll_wait(filp, &stdev->event_wq, wait);
  400. if (lock_mutex_and_test_alive(stdev))
  401. return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP;
  402. mutex_unlock(&stdev->mrpc_mutex);
  403. if (try_wait_for_completion(&stuser->comp))
  404. ret |= POLLIN | POLLRDNORM;
  405. if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
  406. ret |= POLLPRI | POLLRDBAND;
  407. return ret;
  408. }
  409. static int ioctl_flash_info(struct switchtec_dev *stdev,
  410. struct switchtec_ioctl_flash_info __user *uinfo)
  411. {
  412. struct switchtec_ioctl_flash_info info = {0};
  413. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  414. info.flash_length = ioread32(&fi->flash_length);
  415. info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
  416. if (copy_to_user(uinfo, &info, sizeof(info)))
  417. return -EFAULT;
  418. return 0;
  419. }
  420. static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
  421. struct partition_info __iomem *pi)
  422. {
  423. info->address = ioread32(&pi->address);
  424. info->length = ioread32(&pi->length);
  425. }
  426. static int ioctl_flash_part_info(struct switchtec_dev *stdev,
  427. struct switchtec_ioctl_flash_part_info __user *uinfo)
  428. {
  429. struct switchtec_ioctl_flash_part_info info = {0};
  430. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  431. struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
  432. u32 active_addr = -1;
  433. if (copy_from_user(&info, uinfo, sizeof(info)))
  434. return -EFAULT;
  435. switch (info.flash_partition) {
  436. case SWITCHTEC_IOCTL_PART_CFG0:
  437. active_addr = ioread32(&fi->active_cfg);
  438. set_fw_info_part(&info, &fi->cfg0);
  439. if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
  440. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  441. break;
  442. case SWITCHTEC_IOCTL_PART_CFG1:
  443. active_addr = ioread32(&fi->active_cfg);
  444. set_fw_info_part(&info, &fi->cfg1);
  445. if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
  446. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  447. break;
  448. case SWITCHTEC_IOCTL_PART_IMG0:
  449. active_addr = ioread32(&fi->active_img);
  450. set_fw_info_part(&info, &fi->img0);
  451. if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
  452. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  453. break;
  454. case SWITCHTEC_IOCTL_PART_IMG1:
  455. active_addr = ioread32(&fi->active_img);
  456. set_fw_info_part(&info, &fi->img1);
  457. if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
  458. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  459. break;
  460. case SWITCHTEC_IOCTL_PART_NVLOG:
  461. set_fw_info_part(&info, &fi->nvlog);
  462. break;
  463. case SWITCHTEC_IOCTL_PART_VENDOR0:
  464. set_fw_info_part(&info, &fi->vendor[0]);
  465. break;
  466. case SWITCHTEC_IOCTL_PART_VENDOR1:
  467. set_fw_info_part(&info, &fi->vendor[1]);
  468. break;
  469. case SWITCHTEC_IOCTL_PART_VENDOR2:
  470. set_fw_info_part(&info, &fi->vendor[2]);
  471. break;
  472. case SWITCHTEC_IOCTL_PART_VENDOR3:
  473. set_fw_info_part(&info, &fi->vendor[3]);
  474. break;
  475. case SWITCHTEC_IOCTL_PART_VENDOR4:
  476. set_fw_info_part(&info, &fi->vendor[4]);
  477. break;
  478. case SWITCHTEC_IOCTL_PART_VENDOR5:
  479. set_fw_info_part(&info, &fi->vendor[5]);
  480. break;
  481. case SWITCHTEC_IOCTL_PART_VENDOR6:
  482. set_fw_info_part(&info, &fi->vendor[6]);
  483. break;
  484. case SWITCHTEC_IOCTL_PART_VENDOR7:
  485. set_fw_info_part(&info, &fi->vendor[7]);
  486. break;
  487. default:
  488. return -EINVAL;
  489. }
  490. if (info.address == active_addr)
  491. info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  492. if (copy_to_user(uinfo, &info, sizeof(info)))
  493. return -EFAULT;
  494. return 0;
  495. }
  496. static int ioctl_event_summary(struct switchtec_dev *stdev,
  497. struct switchtec_user *stuser,
  498. struct switchtec_ioctl_event_summary __user *usum)
  499. {
  500. struct switchtec_ioctl_event_summary s = {0};
  501. int i;
  502. u32 reg;
  503. s.global = ioread32(&stdev->mmio_sw_event->global_summary);
  504. s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
  505. s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
  506. for (i = 0; i < stdev->partition_count; i++) {
  507. reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
  508. s.part[i] = reg;
  509. }
  510. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  511. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  512. if (reg != MICROSEMI_VENDOR_ID)
  513. break;
  514. reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
  515. s.pff[i] = reg;
  516. }
  517. if (copy_to_user(usum, &s, sizeof(s)))
  518. return -EFAULT;
  519. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  520. return 0;
  521. }
  522. static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
  523. size_t offset, int index)
  524. {
  525. return (void __iomem *)stdev->mmio_sw_event + offset;
  526. }
  527. static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
  528. size_t offset, int index)
  529. {
  530. return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
  531. }
  532. static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
  533. size_t offset, int index)
  534. {
  535. return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
  536. }
  537. #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
  538. #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
  539. #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
  540. static const struct event_reg {
  541. size_t offset;
  542. u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
  543. size_t offset, int index);
  544. } event_regs[] = {
  545. EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
  546. EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
  547. EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
  548. EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
  549. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
  550. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
  551. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
  552. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
  553. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
  554. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
  555. twi_mrpc_comp_async_hdr),
  556. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
  557. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
  558. cli_mrpc_comp_async_hdr),
  559. EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
  560. EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
  561. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
  562. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
  563. EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
  564. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
  565. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
  566. EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
  567. EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
  568. EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
  569. EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
  570. EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
  571. EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
  572. EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
  573. EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
  574. EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
  575. EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
  576. };
  577. static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
  578. int event_id, int index)
  579. {
  580. size_t off;
  581. if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  582. return ERR_PTR(-EINVAL);
  583. off = event_regs[event_id].offset;
  584. if (event_regs[event_id].map_reg == part_ev_reg) {
  585. if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  586. index = stdev->partition;
  587. else if (index < 0 || index >= stdev->partition_count)
  588. return ERR_PTR(-EINVAL);
  589. } else if (event_regs[event_id].map_reg == pff_ev_reg) {
  590. if (index < 0 || index >= stdev->pff_csr_count)
  591. return ERR_PTR(-EINVAL);
  592. }
  593. return event_regs[event_id].map_reg(stdev, off, index);
  594. }
  595. static int event_ctl(struct switchtec_dev *stdev,
  596. struct switchtec_ioctl_event_ctl *ctl)
  597. {
  598. int i;
  599. u32 __iomem *reg;
  600. u32 hdr;
  601. reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
  602. if (IS_ERR(reg))
  603. return PTR_ERR(reg);
  604. hdr = ioread32(reg);
  605. for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
  606. ctl->data[i] = ioread32(&reg[i + 1]);
  607. ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
  608. ctl->count = (hdr >> 5) & 0xFF;
  609. if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
  610. hdr &= ~SWITCHTEC_EVENT_CLEAR;
  611. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
  612. hdr |= SWITCHTEC_EVENT_EN_IRQ;
  613. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
  614. hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
  615. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
  616. hdr |= SWITCHTEC_EVENT_EN_LOG;
  617. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
  618. hdr &= ~SWITCHTEC_EVENT_EN_LOG;
  619. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
  620. hdr |= SWITCHTEC_EVENT_EN_CLI;
  621. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
  622. hdr &= ~SWITCHTEC_EVENT_EN_CLI;
  623. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
  624. hdr |= SWITCHTEC_EVENT_FATAL;
  625. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
  626. hdr &= ~SWITCHTEC_EVENT_FATAL;
  627. if (ctl->flags)
  628. iowrite32(hdr, reg);
  629. ctl->flags = 0;
  630. if (hdr & SWITCHTEC_EVENT_EN_IRQ)
  631. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
  632. if (hdr & SWITCHTEC_EVENT_EN_LOG)
  633. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
  634. if (hdr & SWITCHTEC_EVENT_EN_CLI)
  635. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
  636. if (hdr & SWITCHTEC_EVENT_FATAL)
  637. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
  638. return 0;
  639. }
  640. static int ioctl_event_ctl(struct switchtec_dev *stdev,
  641. struct switchtec_ioctl_event_ctl __user *uctl)
  642. {
  643. int ret;
  644. int nr_idxs;
  645. struct switchtec_ioctl_event_ctl ctl;
  646. if (copy_from_user(&ctl, uctl, sizeof(ctl)))
  647. return -EFAULT;
  648. if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  649. return -EINVAL;
  650. if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
  651. return -EINVAL;
  652. if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
  653. if (event_regs[ctl.event_id].map_reg == global_ev_reg)
  654. nr_idxs = 1;
  655. else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
  656. nr_idxs = stdev->partition_count;
  657. else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
  658. nr_idxs = stdev->pff_csr_count;
  659. else
  660. return -EINVAL;
  661. for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
  662. ret = event_ctl(stdev, &ctl);
  663. if (ret < 0)
  664. return ret;
  665. }
  666. } else {
  667. ret = event_ctl(stdev, &ctl);
  668. if (ret < 0)
  669. return ret;
  670. }
  671. if (copy_to_user(uctl, &ctl, sizeof(ctl)))
  672. return -EFAULT;
  673. return 0;
  674. }
  675. static int ioctl_pff_to_port(struct switchtec_dev *stdev,
  676. struct switchtec_ioctl_pff_port *up)
  677. {
  678. int i, part;
  679. u32 reg;
  680. struct part_cfg_regs *pcfg;
  681. struct switchtec_ioctl_pff_port p;
  682. if (copy_from_user(&p, up, sizeof(p)))
  683. return -EFAULT;
  684. p.port = -1;
  685. for (part = 0; part < stdev->partition_count; part++) {
  686. pcfg = &stdev->mmio_part_cfg_all[part];
  687. p.partition = part;
  688. reg = ioread32(&pcfg->usp_pff_inst_id);
  689. if (reg == p.pff) {
  690. p.port = 0;
  691. break;
  692. }
  693. reg = ioread32(&pcfg->vep_pff_inst_id);
  694. if (reg == p.pff) {
  695. p.port = SWITCHTEC_IOCTL_PFF_VEP;
  696. break;
  697. }
  698. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  699. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  700. if (reg != p.pff)
  701. continue;
  702. p.port = i + 1;
  703. break;
  704. }
  705. if (p.port != -1)
  706. break;
  707. }
  708. if (copy_to_user(up, &p, sizeof(p)))
  709. return -EFAULT;
  710. return 0;
  711. }
  712. static int ioctl_port_to_pff(struct switchtec_dev *stdev,
  713. struct switchtec_ioctl_pff_port *up)
  714. {
  715. struct switchtec_ioctl_pff_port p;
  716. struct part_cfg_regs *pcfg;
  717. if (copy_from_user(&p, up, sizeof(p)))
  718. return -EFAULT;
  719. if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  720. pcfg = stdev->mmio_part_cfg;
  721. else if (p.partition < stdev->partition_count)
  722. pcfg = &stdev->mmio_part_cfg_all[p.partition];
  723. else
  724. return -EINVAL;
  725. switch (p.port) {
  726. case 0:
  727. p.pff = ioread32(&pcfg->usp_pff_inst_id);
  728. break;
  729. case SWITCHTEC_IOCTL_PFF_VEP:
  730. p.pff = ioread32(&pcfg->vep_pff_inst_id);
  731. break;
  732. default:
  733. if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
  734. return -EINVAL;
  735. p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
  736. break;
  737. }
  738. if (copy_to_user(up, &p, sizeof(p)))
  739. return -EFAULT;
  740. return 0;
  741. }
  742. static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
  743. unsigned long arg)
  744. {
  745. struct switchtec_user *stuser = filp->private_data;
  746. struct switchtec_dev *stdev = stuser->stdev;
  747. int rc;
  748. void __user *argp = (void __user *)arg;
  749. rc = lock_mutex_and_test_alive(stdev);
  750. if (rc)
  751. return rc;
  752. switch (cmd) {
  753. case SWITCHTEC_IOCTL_FLASH_INFO:
  754. rc = ioctl_flash_info(stdev, argp);
  755. break;
  756. case SWITCHTEC_IOCTL_FLASH_PART_INFO:
  757. rc = ioctl_flash_part_info(stdev, argp);
  758. break;
  759. case SWITCHTEC_IOCTL_EVENT_SUMMARY:
  760. rc = ioctl_event_summary(stdev, stuser, argp);
  761. break;
  762. case SWITCHTEC_IOCTL_EVENT_CTL:
  763. rc = ioctl_event_ctl(stdev, argp);
  764. break;
  765. case SWITCHTEC_IOCTL_PFF_TO_PORT:
  766. rc = ioctl_pff_to_port(stdev, argp);
  767. break;
  768. case SWITCHTEC_IOCTL_PORT_TO_PFF:
  769. rc = ioctl_port_to_pff(stdev, argp);
  770. break;
  771. default:
  772. rc = -ENOTTY;
  773. break;
  774. }
  775. mutex_unlock(&stdev->mrpc_mutex);
  776. return rc;
  777. }
  778. static const struct file_operations switchtec_fops = {
  779. .owner = THIS_MODULE,
  780. .open = switchtec_dev_open,
  781. .release = switchtec_dev_release,
  782. .write = switchtec_dev_write,
  783. .read = switchtec_dev_read,
  784. .poll = switchtec_dev_poll,
  785. .unlocked_ioctl = switchtec_dev_ioctl,
  786. .compat_ioctl = switchtec_dev_ioctl,
  787. };
  788. static void link_event_work(struct work_struct *work)
  789. {
  790. struct switchtec_dev *stdev;
  791. stdev = container_of(work, struct switchtec_dev, link_event_work);
  792. if (stdev->link_notifier)
  793. stdev->link_notifier(stdev);
  794. }
  795. static void check_link_state_events(struct switchtec_dev *stdev)
  796. {
  797. int idx;
  798. u32 reg;
  799. int count;
  800. int occurred = 0;
  801. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  802. reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
  803. dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
  804. count = (reg >> 5) & 0xFF;
  805. if (count != stdev->link_event_count[idx]) {
  806. occurred = 1;
  807. stdev->link_event_count[idx] = count;
  808. }
  809. }
  810. if (occurred)
  811. schedule_work(&stdev->link_event_work);
  812. }
  813. static void enable_link_state_events(struct switchtec_dev *stdev)
  814. {
  815. int idx;
  816. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  817. iowrite32(SWITCHTEC_EVENT_CLEAR |
  818. SWITCHTEC_EVENT_EN_IRQ,
  819. &stdev->mmio_pff_csr[idx].link_state_hdr);
  820. }
  821. }
  822. static void stdev_release(struct device *dev)
  823. {
  824. struct switchtec_dev *stdev = to_stdev(dev);
  825. kfree(stdev);
  826. }
  827. static void stdev_kill(struct switchtec_dev *stdev)
  828. {
  829. struct switchtec_user *stuser, *tmpuser;
  830. pci_clear_master(stdev->pdev);
  831. cancel_delayed_work_sync(&stdev->mrpc_timeout);
  832. /* Mark the hardware as unavailable and complete all completions */
  833. mutex_lock(&stdev->mrpc_mutex);
  834. stdev->alive = false;
  835. /* Wake up and kill any users waiting on an MRPC request */
  836. list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
  837. complete_all(&stuser->comp);
  838. list_del_init(&stuser->list);
  839. stuser_put(stuser);
  840. }
  841. mutex_unlock(&stdev->mrpc_mutex);
  842. /* Wake up any users waiting on event_wq */
  843. wake_up_interruptible(&stdev->event_wq);
  844. }
  845. static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
  846. {
  847. struct switchtec_dev *stdev;
  848. int minor;
  849. struct device *dev;
  850. struct cdev *cdev;
  851. int rc;
  852. stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
  853. dev_to_node(&pdev->dev));
  854. if (!stdev)
  855. return ERR_PTR(-ENOMEM);
  856. stdev->alive = true;
  857. stdev->pdev = pdev;
  858. INIT_LIST_HEAD(&stdev->mrpc_queue);
  859. mutex_init(&stdev->mrpc_mutex);
  860. stdev->mrpc_busy = 0;
  861. INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
  862. INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
  863. INIT_WORK(&stdev->link_event_work, link_event_work);
  864. init_waitqueue_head(&stdev->event_wq);
  865. atomic_set(&stdev->event_cnt, 0);
  866. dev = &stdev->dev;
  867. device_initialize(dev);
  868. dev->class = switchtec_class;
  869. dev->parent = &pdev->dev;
  870. dev->groups = switchtec_device_groups;
  871. dev->release = stdev_release;
  872. minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
  873. GFP_KERNEL);
  874. if (minor < 0) {
  875. rc = minor;
  876. goto err_put;
  877. }
  878. dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
  879. dev_set_name(dev, "switchtec%d", minor);
  880. cdev = &stdev->cdev;
  881. cdev_init(cdev, &switchtec_fops);
  882. cdev->owner = THIS_MODULE;
  883. return stdev;
  884. err_put:
  885. put_device(&stdev->dev);
  886. return ERR_PTR(rc);
  887. }
  888. static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
  889. {
  890. size_t off = event_regs[eid].offset;
  891. u32 __iomem *hdr_reg;
  892. u32 hdr;
  893. hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
  894. hdr = ioread32(hdr_reg);
  895. if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
  896. return 0;
  897. if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
  898. return 0;
  899. dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
  900. hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
  901. iowrite32(hdr, hdr_reg);
  902. return 1;
  903. }
  904. static int mask_all_events(struct switchtec_dev *stdev, int eid)
  905. {
  906. int idx;
  907. int count = 0;
  908. if (event_regs[eid].map_reg == part_ev_reg) {
  909. for (idx = 0; idx < stdev->partition_count; idx++)
  910. count += mask_event(stdev, eid, idx);
  911. } else if (event_regs[eid].map_reg == pff_ev_reg) {
  912. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  913. if (!stdev->pff_local[idx])
  914. continue;
  915. count += mask_event(stdev, eid, idx);
  916. }
  917. } else {
  918. count += mask_event(stdev, eid, 0);
  919. }
  920. return count;
  921. }
  922. static irqreturn_t switchtec_event_isr(int irq, void *dev)
  923. {
  924. struct switchtec_dev *stdev = dev;
  925. u32 reg;
  926. irqreturn_t ret = IRQ_NONE;
  927. int eid, event_count = 0;
  928. reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
  929. if (reg & SWITCHTEC_EVENT_OCCURRED) {
  930. dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
  931. ret = IRQ_HANDLED;
  932. schedule_work(&stdev->mrpc_work);
  933. iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
  934. }
  935. check_link_state_events(stdev);
  936. for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
  937. event_count += mask_all_events(stdev, eid);
  938. if (event_count) {
  939. atomic_inc(&stdev->event_cnt);
  940. wake_up_interruptible(&stdev->event_wq);
  941. dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
  942. event_count);
  943. return IRQ_HANDLED;
  944. }
  945. return ret;
  946. }
  947. static int switchtec_init_isr(struct switchtec_dev *stdev)
  948. {
  949. int nvecs;
  950. int event_irq;
  951. nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
  952. PCI_IRQ_MSIX | PCI_IRQ_MSI);
  953. if (nvecs < 0)
  954. return nvecs;
  955. event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
  956. if (event_irq < 0 || event_irq >= nvecs)
  957. return -EFAULT;
  958. event_irq = pci_irq_vector(stdev->pdev, event_irq);
  959. if (event_irq < 0)
  960. return event_irq;
  961. return devm_request_irq(&stdev->pdev->dev, event_irq,
  962. switchtec_event_isr, 0,
  963. KBUILD_MODNAME, stdev);
  964. }
  965. static void init_pff(struct switchtec_dev *stdev)
  966. {
  967. int i;
  968. u32 reg;
  969. struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
  970. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  971. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  972. if (reg != MICROSEMI_VENDOR_ID)
  973. break;
  974. }
  975. stdev->pff_csr_count = i;
  976. reg = ioread32(&pcfg->usp_pff_inst_id);
  977. if (reg < SWITCHTEC_MAX_PFF_CSR)
  978. stdev->pff_local[reg] = 1;
  979. reg = ioread32(&pcfg->vep_pff_inst_id);
  980. if (reg < SWITCHTEC_MAX_PFF_CSR)
  981. stdev->pff_local[reg] = 1;
  982. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  983. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  984. if (reg < SWITCHTEC_MAX_PFF_CSR)
  985. stdev->pff_local[reg] = 1;
  986. }
  987. }
  988. static int switchtec_init_pci(struct switchtec_dev *stdev,
  989. struct pci_dev *pdev)
  990. {
  991. int rc;
  992. rc = pcim_enable_device(pdev);
  993. if (rc)
  994. return rc;
  995. rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
  996. if (rc)
  997. return rc;
  998. pci_set_master(pdev);
  999. stdev->mmio = pcim_iomap_table(pdev)[0];
  1000. stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
  1001. stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
  1002. stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
  1003. stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
  1004. stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
  1005. stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
  1006. stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
  1007. stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
  1008. stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
  1009. stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
  1010. if (stdev->partition_count < 1)
  1011. stdev->partition_count = 1;
  1012. init_pff(stdev);
  1013. pci_set_drvdata(pdev, stdev);
  1014. return 0;
  1015. }
  1016. static int switchtec_pci_probe(struct pci_dev *pdev,
  1017. const struct pci_device_id *id)
  1018. {
  1019. struct switchtec_dev *stdev;
  1020. int rc;
  1021. if (pdev->class == MICROSEMI_NTB_CLASSCODE)
  1022. request_module_nowait("ntb_hw_switchtec");
  1023. stdev = stdev_create(pdev);
  1024. if (IS_ERR(stdev))
  1025. return PTR_ERR(stdev);
  1026. rc = switchtec_init_pci(stdev, pdev);
  1027. if (rc)
  1028. goto err_put;
  1029. rc = switchtec_init_isr(stdev);
  1030. if (rc) {
  1031. dev_err(&stdev->dev, "failed to init isr.\n");
  1032. goto err_put;
  1033. }
  1034. iowrite32(SWITCHTEC_EVENT_CLEAR |
  1035. SWITCHTEC_EVENT_EN_IRQ,
  1036. &stdev->mmio_part_cfg->mrpc_comp_hdr);
  1037. enable_link_state_events(stdev);
  1038. rc = cdev_device_add(&stdev->cdev, &stdev->dev);
  1039. if (rc)
  1040. goto err_devadd;
  1041. dev_info(&stdev->dev, "Management device registered.\n");
  1042. return 0;
  1043. err_devadd:
  1044. stdev_kill(stdev);
  1045. err_put:
  1046. ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1047. put_device(&stdev->dev);
  1048. return rc;
  1049. }
  1050. static void switchtec_pci_remove(struct pci_dev *pdev)
  1051. {
  1052. struct switchtec_dev *stdev = pci_get_drvdata(pdev);
  1053. pci_set_drvdata(pdev, NULL);
  1054. cdev_device_del(&stdev->cdev, &stdev->dev);
  1055. ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1056. dev_info(&stdev->dev, "unregistered.\n");
  1057. stdev_kill(stdev);
  1058. put_device(&stdev->dev);
  1059. }
  1060. #define SWITCHTEC_PCI_DEVICE(device_id) \
  1061. { \
  1062. .vendor = MICROSEMI_VENDOR_ID, \
  1063. .device = device_id, \
  1064. .subvendor = PCI_ANY_ID, \
  1065. .subdevice = PCI_ANY_ID, \
  1066. .class = MICROSEMI_MGMT_CLASSCODE, \
  1067. .class_mask = 0xFFFFFFFF, \
  1068. }, \
  1069. { \
  1070. .vendor = MICROSEMI_VENDOR_ID, \
  1071. .device = device_id, \
  1072. .subvendor = PCI_ANY_ID, \
  1073. .subdevice = PCI_ANY_ID, \
  1074. .class = MICROSEMI_NTB_CLASSCODE, \
  1075. .class_mask = 0xFFFFFFFF, \
  1076. }
  1077. static const struct pci_device_id switchtec_pci_tbl[] = {
  1078. SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
  1079. SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
  1080. SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
  1081. SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
  1082. SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
  1083. SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
  1084. SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
  1085. SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
  1086. SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
  1087. SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
  1088. SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3
  1089. SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3
  1090. SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3
  1091. SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3
  1092. SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3
  1093. SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3
  1094. SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3
  1095. SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3
  1096. SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3
  1097. SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3
  1098. SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3
  1099. SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3
  1100. SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3
  1101. SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3
  1102. SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3
  1103. SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3
  1104. SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3
  1105. SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3
  1106. {0}
  1107. };
  1108. MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
  1109. static struct pci_driver switchtec_pci_driver = {
  1110. .name = KBUILD_MODNAME,
  1111. .id_table = switchtec_pci_tbl,
  1112. .probe = switchtec_pci_probe,
  1113. .remove = switchtec_pci_remove,
  1114. };
  1115. static int __init switchtec_init(void)
  1116. {
  1117. int rc;
  1118. rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
  1119. "switchtec");
  1120. if (rc)
  1121. return rc;
  1122. switchtec_class = class_create(THIS_MODULE, "switchtec");
  1123. if (IS_ERR(switchtec_class)) {
  1124. rc = PTR_ERR(switchtec_class);
  1125. goto err_create_class;
  1126. }
  1127. rc = pci_register_driver(&switchtec_pci_driver);
  1128. if (rc)
  1129. goto err_pci_register;
  1130. pr_info(KBUILD_MODNAME ": loaded.\n");
  1131. return 0;
  1132. err_pci_register:
  1133. class_destroy(switchtec_class);
  1134. err_create_class:
  1135. unregister_chrdev_region(switchtec_devt, max_devices);
  1136. return rc;
  1137. }
  1138. module_init(switchtec_init);
  1139. static void __exit switchtec_exit(void)
  1140. {
  1141. pci_unregister_driver(&switchtec_pci_driver);
  1142. class_destroy(switchtec_class);
  1143. unregister_chrdev_region(switchtec_devt, max_devices);
  1144. ida_destroy(&switchtec_minor_ida);
  1145. pr_info(KBUILD_MODNAME ": unloaded.\n");
  1146. }
  1147. module_exit(switchtec_exit);