switchtec.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Microsemi Switchtec(tm) PCIe Management Driver
  4. * Copyright (c) 2017, Microsemi Corporation
  5. */
  6. #include <linux/switchtec.h>
  7. #include <linux/switchtec_ioctl.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/fs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/poll.h>
  13. #include <linux/wait.h>
  14. MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
  15. MODULE_VERSION("0.1");
  16. MODULE_LICENSE("GPL");
  17. MODULE_AUTHOR("Microsemi Corporation");
  18. static int max_devices = 16;
  19. module_param(max_devices, int, 0644);
  20. MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
  21. static dev_t switchtec_devt;
  22. static DEFINE_IDA(switchtec_minor_ida);
  23. struct class *switchtec_class;
  24. EXPORT_SYMBOL_GPL(switchtec_class);
  25. enum mrpc_state {
  26. MRPC_IDLE = 0,
  27. MRPC_QUEUED,
  28. MRPC_RUNNING,
  29. MRPC_DONE,
  30. };
  31. struct switchtec_user {
  32. struct switchtec_dev *stdev;
  33. enum mrpc_state state;
  34. struct completion comp;
  35. struct kref kref;
  36. struct list_head list;
  37. u32 cmd;
  38. u32 status;
  39. u32 return_code;
  40. size_t data_len;
  41. size_t read_len;
  42. unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
  43. int event_cnt;
  44. };
  45. static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
  46. {
  47. struct switchtec_user *stuser;
  48. stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
  49. if (!stuser)
  50. return ERR_PTR(-ENOMEM);
  51. get_device(&stdev->dev);
  52. stuser->stdev = stdev;
  53. kref_init(&stuser->kref);
  54. INIT_LIST_HEAD(&stuser->list);
  55. init_completion(&stuser->comp);
  56. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  57. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  58. return stuser;
  59. }
  60. static void stuser_free(struct kref *kref)
  61. {
  62. struct switchtec_user *stuser;
  63. stuser = container_of(kref, struct switchtec_user, kref);
  64. dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
  65. put_device(&stuser->stdev->dev);
  66. kfree(stuser);
  67. }
  68. static void stuser_put(struct switchtec_user *stuser)
  69. {
  70. kref_put(&stuser->kref, stuser_free);
  71. }
  72. static void stuser_set_state(struct switchtec_user *stuser,
  73. enum mrpc_state state)
  74. {
  75. /* requires the mrpc_mutex to already be held when called */
  76. const char * const state_names[] = {
  77. [MRPC_IDLE] = "IDLE",
  78. [MRPC_QUEUED] = "QUEUED",
  79. [MRPC_RUNNING] = "RUNNING",
  80. [MRPC_DONE] = "DONE",
  81. };
  82. stuser->state = state;
  83. dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
  84. stuser, state_names[state]);
  85. }
  86. static void mrpc_complete_cmd(struct switchtec_dev *stdev);
  87. static void mrpc_cmd_submit(struct switchtec_dev *stdev)
  88. {
  89. /* requires the mrpc_mutex to already be held when called */
  90. struct switchtec_user *stuser;
  91. if (stdev->mrpc_busy)
  92. return;
  93. if (list_empty(&stdev->mrpc_queue))
  94. return;
  95. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  96. list);
  97. stuser_set_state(stuser, MRPC_RUNNING);
  98. stdev->mrpc_busy = 1;
  99. memcpy_toio(&stdev->mmio_mrpc->input_data,
  100. stuser->data, stuser->data_len);
  101. iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
  102. stuser->status = ioread32(&stdev->mmio_mrpc->status);
  103. if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
  104. mrpc_complete_cmd(stdev);
  105. schedule_delayed_work(&stdev->mrpc_timeout,
  106. msecs_to_jiffies(500));
  107. }
  108. static int mrpc_queue_cmd(struct switchtec_user *stuser)
  109. {
  110. /* requires the mrpc_mutex to already be held when called */
  111. struct switchtec_dev *stdev = stuser->stdev;
  112. kref_get(&stuser->kref);
  113. stuser->read_len = sizeof(stuser->data);
  114. stuser_set_state(stuser, MRPC_QUEUED);
  115. init_completion(&stuser->comp);
  116. list_add_tail(&stuser->list, &stdev->mrpc_queue);
  117. mrpc_cmd_submit(stdev);
  118. return 0;
  119. }
  120. static void mrpc_complete_cmd(struct switchtec_dev *stdev)
  121. {
  122. /* requires the mrpc_mutex to already be held when called */
  123. struct switchtec_user *stuser;
  124. if (list_empty(&stdev->mrpc_queue))
  125. return;
  126. stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
  127. list);
  128. stuser->status = ioread32(&stdev->mmio_mrpc->status);
  129. if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
  130. return;
  131. stuser_set_state(stuser, MRPC_DONE);
  132. stuser->return_code = 0;
  133. if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
  134. goto out;
  135. stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
  136. if (stuser->return_code != 0)
  137. goto out;
  138. memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
  139. stuser->read_len);
  140. out:
  141. complete_all(&stuser->comp);
  142. list_del_init(&stuser->list);
  143. stuser_put(stuser);
  144. stdev->mrpc_busy = 0;
  145. mrpc_cmd_submit(stdev);
  146. }
  147. static void mrpc_event_work(struct work_struct *work)
  148. {
  149. struct switchtec_dev *stdev;
  150. stdev = container_of(work, struct switchtec_dev, mrpc_work);
  151. dev_dbg(&stdev->dev, "%s\n", __func__);
  152. mutex_lock(&stdev->mrpc_mutex);
  153. cancel_delayed_work(&stdev->mrpc_timeout);
  154. mrpc_complete_cmd(stdev);
  155. mutex_unlock(&stdev->mrpc_mutex);
  156. }
  157. static void mrpc_timeout_work(struct work_struct *work)
  158. {
  159. struct switchtec_dev *stdev;
  160. u32 status;
  161. stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
  162. dev_dbg(&stdev->dev, "%s\n", __func__);
  163. mutex_lock(&stdev->mrpc_mutex);
  164. status = ioread32(&stdev->mmio_mrpc->status);
  165. if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
  166. schedule_delayed_work(&stdev->mrpc_timeout,
  167. msecs_to_jiffies(500));
  168. goto out;
  169. }
  170. mrpc_complete_cmd(stdev);
  171. out:
  172. mutex_unlock(&stdev->mrpc_mutex);
  173. }
  174. static ssize_t device_version_show(struct device *dev,
  175. struct device_attribute *attr, char *buf)
  176. {
  177. struct switchtec_dev *stdev = to_stdev(dev);
  178. u32 ver;
  179. ver = ioread32(&stdev->mmio_sys_info->device_version);
  180. return sprintf(buf, "%x\n", ver);
  181. }
  182. static DEVICE_ATTR_RO(device_version);
  183. static ssize_t fw_version_show(struct device *dev,
  184. struct device_attribute *attr, char *buf)
  185. {
  186. struct switchtec_dev *stdev = to_stdev(dev);
  187. u32 ver;
  188. ver = ioread32(&stdev->mmio_sys_info->firmware_version);
  189. return sprintf(buf, "%08x\n", ver);
  190. }
  191. static DEVICE_ATTR_RO(fw_version);
  192. static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
  193. {
  194. int i;
  195. memcpy_fromio(buf, attr, len);
  196. buf[len] = '\n';
  197. buf[len + 1] = 0;
  198. for (i = len - 1; i > 0; i--) {
  199. if (buf[i] != ' ')
  200. break;
  201. buf[i] = '\n';
  202. buf[i + 1] = 0;
  203. }
  204. return strlen(buf);
  205. }
  206. #define DEVICE_ATTR_SYS_INFO_STR(field) \
  207. static ssize_t field ## _show(struct device *dev, \
  208. struct device_attribute *attr, char *buf) \
  209. { \
  210. struct switchtec_dev *stdev = to_stdev(dev); \
  211. return io_string_show(buf, &stdev->mmio_sys_info->field, \
  212. sizeof(stdev->mmio_sys_info->field)); \
  213. } \
  214. \
  215. static DEVICE_ATTR_RO(field)
  216. DEVICE_ATTR_SYS_INFO_STR(vendor_id);
  217. DEVICE_ATTR_SYS_INFO_STR(product_id);
  218. DEVICE_ATTR_SYS_INFO_STR(product_revision);
  219. DEVICE_ATTR_SYS_INFO_STR(component_vendor);
  220. static ssize_t component_id_show(struct device *dev,
  221. struct device_attribute *attr, char *buf)
  222. {
  223. struct switchtec_dev *stdev = to_stdev(dev);
  224. int id = ioread16(&stdev->mmio_sys_info->component_id);
  225. return sprintf(buf, "PM%04X\n", id);
  226. }
  227. static DEVICE_ATTR_RO(component_id);
  228. static ssize_t component_revision_show(struct device *dev,
  229. struct device_attribute *attr, char *buf)
  230. {
  231. struct switchtec_dev *stdev = to_stdev(dev);
  232. int rev = ioread8(&stdev->mmio_sys_info->component_revision);
  233. return sprintf(buf, "%d\n", rev);
  234. }
  235. static DEVICE_ATTR_RO(component_revision);
  236. static ssize_t partition_show(struct device *dev,
  237. struct device_attribute *attr, char *buf)
  238. {
  239. struct switchtec_dev *stdev = to_stdev(dev);
  240. return sprintf(buf, "%d\n", stdev->partition);
  241. }
  242. static DEVICE_ATTR_RO(partition);
  243. static ssize_t partition_count_show(struct device *dev,
  244. struct device_attribute *attr, char *buf)
  245. {
  246. struct switchtec_dev *stdev = to_stdev(dev);
  247. return sprintf(buf, "%d\n", stdev->partition_count);
  248. }
  249. static DEVICE_ATTR_RO(partition_count);
  250. static struct attribute *switchtec_device_attrs[] = {
  251. &dev_attr_device_version.attr,
  252. &dev_attr_fw_version.attr,
  253. &dev_attr_vendor_id.attr,
  254. &dev_attr_product_id.attr,
  255. &dev_attr_product_revision.attr,
  256. &dev_attr_component_vendor.attr,
  257. &dev_attr_component_id.attr,
  258. &dev_attr_component_revision.attr,
  259. &dev_attr_partition.attr,
  260. &dev_attr_partition_count.attr,
  261. NULL,
  262. };
  263. ATTRIBUTE_GROUPS(switchtec_device);
  264. static int switchtec_dev_open(struct inode *inode, struct file *filp)
  265. {
  266. struct switchtec_dev *stdev;
  267. struct switchtec_user *stuser;
  268. stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
  269. stuser = stuser_create(stdev);
  270. if (IS_ERR(stuser))
  271. return PTR_ERR(stuser);
  272. filp->private_data = stuser;
  273. nonseekable_open(inode, filp);
  274. dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
  275. return 0;
  276. }
  277. static int switchtec_dev_release(struct inode *inode, struct file *filp)
  278. {
  279. struct switchtec_user *stuser = filp->private_data;
  280. stuser_put(stuser);
  281. return 0;
  282. }
  283. static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
  284. {
  285. if (mutex_lock_interruptible(&stdev->mrpc_mutex))
  286. return -EINTR;
  287. if (!stdev->alive) {
  288. mutex_unlock(&stdev->mrpc_mutex);
  289. return -ENODEV;
  290. }
  291. return 0;
  292. }
  293. static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
  294. size_t size, loff_t *off)
  295. {
  296. struct switchtec_user *stuser = filp->private_data;
  297. struct switchtec_dev *stdev = stuser->stdev;
  298. int rc;
  299. if (size < sizeof(stuser->cmd) ||
  300. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  301. return -EINVAL;
  302. stuser->data_len = size - sizeof(stuser->cmd);
  303. rc = lock_mutex_and_test_alive(stdev);
  304. if (rc)
  305. return rc;
  306. if (stuser->state != MRPC_IDLE) {
  307. rc = -EBADE;
  308. goto out;
  309. }
  310. rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
  311. if (rc) {
  312. rc = -EFAULT;
  313. goto out;
  314. }
  315. data += sizeof(stuser->cmd);
  316. rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
  317. if (rc) {
  318. rc = -EFAULT;
  319. goto out;
  320. }
  321. rc = mrpc_queue_cmd(stuser);
  322. out:
  323. mutex_unlock(&stdev->mrpc_mutex);
  324. if (rc)
  325. return rc;
  326. return size;
  327. }
  328. static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
  329. size_t size, loff_t *off)
  330. {
  331. struct switchtec_user *stuser = filp->private_data;
  332. struct switchtec_dev *stdev = stuser->stdev;
  333. int rc;
  334. if (size < sizeof(stuser->cmd) ||
  335. size > sizeof(stuser->cmd) + sizeof(stuser->data))
  336. return -EINVAL;
  337. rc = lock_mutex_and_test_alive(stdev);
  338. if (rc)
  339. return rc;
  340. if (stuser->state == MRPC_IDLE) {
  341. mutex_unlock(&stdev->mrpc_mutex);
  342. return -EBADE;
  343. }
  344. stuser->read_len = size - sizeof(stuser->return_code);
  345. mutex_unlock(&stdev->mrpc_mutex);
  346. if (filp->f_flags & O_NONBLOCK) {
  347. if (!try_wait_for_completion(&stuser->comp))
  348. return -EAGAIN;
  349. } else {
  350. rc = wait_for_completion_interruptible(&stuser->comp);
  351. if (rc < 0)
  352. return rc;
  353. }
  354. rc = lock_mutex_and_test_alive(stdev);
  355. if (rc)
  356. return rc;
  357. if (stuser->state != MRPC_DONE) {
  358. mutex_unlock(&stdev->mrpc_mutex);
  359. return -EBADE;
  360. }
  361. rc = copy_to_user(data, &stuser->return_code,
  362. sizeof(stuser->return_code));
  363. if (rc) {
  364. rc = -EFAULT;
  365. goto out;
  366. }
  367. data += sizeof(stuser->return_code);
  368. rc = copy_to_user(data, &stuser->data,
  369. size - sizeof(stuser->return_code));
  370. if (rc) {
  371. rc = -EFAULT;
  372. goto out;
  373. }
  374. stuser_set_state(stuser, MRPC_IDLE);
  375. out:
  376. mutex_unlock(&stdev->mrpc_mutex);
  377. if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
  378. return size;
  379. else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
  380. return -ENXIO;
  381. else
  382. return -EBADMSG;
  383. }
  384. static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
  385. {
  386. struct switchtec_user *stuser = filp->private_data;
  387. struct switchtec_dev *stdev = stuser->stdev;
  388. __poll_t ret = 0;
  389. poll_wait(filp, &stuser->comp.wait, wait);
  390. poll_wait(filp, &stdev->event_wq, wait);
  391. if (lock_mutex_and_test_alive(stdev))
  392. return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
  393. mutex_unlock(&stdev->mrpc_mutex);
  394. if (try_wait_for_completion(&stuser->comp))
  395. ret |= EPOLLIN | EPOLLRDNORM;
  396. if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
  397. ret |= EPOLLPRI | EPOLLRDBAND;
  398. return ret;
  399. }
  400. static int ioctl_flash_info(struct switchtec_dev *stdev,
  401. struct switchtec_ioctl_flash_info __user *uinfo)
  402. {
  403. struct switchtec_ioctl_flash_info info = {0};
  404. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  405. info.flash_length = ioread32(&fi->flash_length);
  406. info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
  407. if (copy_to_user(uinfo, &info, sizeof(info)))
  408. return -EFAULT;
  409. return 0;
  410. }
  411. static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
  412. struct partition_info __iomem *pi)
  413. {
  414. info->address = ioread32(&pi->address);
  415. info->length = ioread32(&pi->length);
  416. }
  417. static int ioctl_flash_part_info(struct switchtec_dev *stdev,
  418. struct switchtec_ioctl_flash_part_info __user *uinfo)
  419. {
  420. struct switchtec_ioctl_flash_part_info info = {0};
  421. struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
  422. struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
  423. u32 active_addr = -1;
  424. if (copy_from_user(&info, uinfo, sizeof(info)))
  425. return -EFAULT;
  426. switch (info.flash_partition) {
  427. case SWITCHTEC_IOCTL_PART_CFG0:
  428. active_addr = ioread32(&fi->active_cfg);
  429. set_fw_info_part(&info, &fi->cfg0);
  430. if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
  431. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  432. break;
  433. case SWITCHTEC_IOCTL_PART_CFG1:
  434. active_addr = ioread32(&fi->active_cfg);
  435. set_fw_info_part(&info, &fi->cfg1);
  436. if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
  437. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  438. break;
  439. case SWITCHTEC_IOCTL_PART_IMG0:
  440. active_addr = ioread32(&fi->active_img);
  441. set_fw_info_part(&info, &fi->img0);
  442. if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
  443. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  444. break;
  445. case SWITCHTEC_IOCTL_PART_IMG1:
  446. active_addr = ioread32(&fi->active_img);
  447. set_fw_info_part(&info, &fi->img1);
  448. if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
  449. info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
  450. break;
  451. case SWITCHTEC_IOCTL_PART_NVLOG:
  452. set_fw_info_part(&info, &fi->nvlog);
  453. break;
  454. case SWITCHTEC_IOCTL_PART_VENDOR0:
  455. set_fw_info_part(&info, &fi->vendor[0]);
  456. break;
  457. case SWITCHTEC_IOCTL_PART_VENDOR1:
  458. set_fw_info_part(&info, &fi->vendor[1]);
  459. break;
  460. case SWITCHTEC_IOCTL_PART_VENDOR2:
  461. set_fw_info_part(&info, &fi->vendor[2]);
  462. break;
  463. case SWITCHTEC_IOCTL_PART_VENDOR3:
  464. set_fw_info_part(&info, &fi->vendor[3]);
  465. break;
  466. case SWITCHTEC_IOCTL_PART_VENDOR4:
  467. set_fw_info_part(&info, &fi->vendor[4]);
  468. break;
  469. case SWITCHTEC_IOCTL_PART_VENDOR5:
  470. set_fw_info_part(&info, &fi->vendor[5]);
  471. break;
  472. case SWITCHTEC_IOCTL_PART_VENDOR6:
  473. set_fw_info_part(&info, &fi->vendor[6]);
  474. break;
  475. case SWITCHTEC_IOCTL_PART_VENDOR7:
  476. set_fw_info_part(&info, &fi->vendor[7]);
  477. break;
  478. default:
  479. return -EINVAL;
  480. }
  481. if (info.address == active_addr)
  482. info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
  483. if (copy_to_user(uinfo, &info, sizeof(info)))
  484. return -EFAULT;
  485. return 0;
  486. }
  487. static int ioctl_event_summary(struct switchtec_dev *stdev,
  488. struct switchtec_user *stuser,
  489. struct switchtec_ioctl_event_summary __user *usum)
  490. {
  491. struct switchtec_ioctl_event_summary s = {0};
  492. int i;
  493. u32 reg;
  494. s.global = ioread32(&stdev->mmio_sw_event->global_summary);
  495. s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
  496. s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
  497. for (i = 0; i < stdev->partition_count; i++) {
  498. reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
  499. s.part[i] = reg;
  500. }
  501. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  502. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  503. if (reg != MICROSEMI_VENDOR_ID)
  504. break;
  505. reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
  506. s.pff[i] = reg;
  507. }
  508. if (copy_to_user(usum, &s, sizeof(s)))
  509. return -EFAULT;
  510. stuser->event_cnt = atomic_read(&stdev->event_cnt);
  511. return 0;
  512. }
  513. static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
  514. size_t offset, int index)
  515. {
  516. return (void __iomem *)stdev->mmio_sw_event + offset;
  517. }
  518. static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
  519. size_t offset, int index)
  520. {
  521. return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
  522. }
  523. static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
  524. size_t offset, int index)
  525. {
  526. return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
  527. }
  528. #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
  529. #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
  530. #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
  531. static const struct event_reg {
  532. size_t offset;
  533. u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
  534. size_t offset, int index);
  535. } event_regs[] = {
  536. EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
  537. EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
  538. EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
  539. EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
  540. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
  541. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
  542. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
  543. EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
  544. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
  545. EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
  546. twi_mrpc_comp_async_hdr),
  547. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
  548. EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
  549. cli_mrpc_comp_async_hdr),
  550. EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
  551. EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
  552. EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
  553. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
  554. EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
  555. EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
  556. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
  557. EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
  558. EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
  559. EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
  560. EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
  561. EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
  562. EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
  563. EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
  564. EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
  565. EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
  566. EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
  567. EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
  568. };
  569. static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
  570. int event_id, int index)
  571. {
  572. size_t off;
  573. if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  574. return ERR_PTR(-EINVAL);
  575. off = event_regs[event_id].offset;
  576. if (event_regs[event_id].map_reg == part_ev_reg) {
  577. if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  578. index = stdev->partition;
  579. else if (index < 0 || index >= stdev->partition_count)
  580. return ERR_PTR(-EINVAL);
  581. } else if (event_regs[event_id].map_reg == pff_ev_reg) {
  582. if (index < 0 || index >= stdev->pff_csr_count)
  583. return ERR_PTR(-EINVAL);
  584. }
  585. return event_regs[event_id].map_reg(stdev, off, index);
  586. }
  587. static int event_ctl(struct switchtec_dev *stdev,
  588. struct switchtec_ioctl_event_ctl *ctl)
  589. {
  590. int i;
  591. u32 __iomem *reg;
  592. u32 hdr;
  593. reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
  594. if (IS_ERR(reg))
  595. return PTR_ERR(reg);
  596. hdr = ioread32(reg);
  597. for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
  598. ctl->data[i] = ioread32(&reg[i + 1]);
  599. ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
  600. ctl->count = (hdr >> 5) & 0xFF;
  601. if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
  602. hdr &= ~SWITCHTEC_EVENT_CLEAR;
  603. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
  604. hdr |= SWITCHTEC_EVENT_EN_IRQ;
  605. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
  606. hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
  607. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
  608. hdr |= SWITCHTEC_EVENT_EN_LOG;
  609. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
  610. hdr &= ~SWITCHTEC_EVENT_EN_LOG;
  611. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
  612. hdr |= SWITCHTEC_EVENT_EN_CLI;
  613. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
  614. hdr &= ~SWITCHTEC_EVENT_EN_CLI;
  615. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
  616. hdr |= SWITCHTEC_EVENT_FATAL;
  617. if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
  618. hdr &= ~SWITCHTEC_EVENT_FATAL;
  619. if (ctl->flags)
  620. iowrite32(hdr, reg);
  621. ctl->flags = 0;
  622. if (hdr & SWITCHTEC_EVENT_EN_IRQ)
  623. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
  624. if (hdr & SWITCHTEC_EVENT_EN_LOG)
  625. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
  626. if (hdr & SWITCHTEC_EVENT_EN_CLI)
  627. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
  628. if (hdr & SWITCHTEC_EVENT_FATAL)
  629. ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
  630. return 0;
  631. }
  632. static int ioctl_event_ctl(struct switchtec_dev *stdev,
  633. struct switchtec_ioctl_event_ctl __user *uctl)
  634. {
  635. int ret;
  636. int nr_idxs;
  637. struct switchtec_ioctl_event_ctl ctl;
  638. if (copy_from_user(&ctl, uctl, sizeof(ctl)))
  639. return -EFAULT;
  640. if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
  641. return -EINVAL;
  642. if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
  643. return -EINVAL;
  644. if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
  645. if (event_regs[ctl.event_id].map_reg == global_ev_reg)
  646. nr_idxs = 1;
  647. else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
  648. nr_idxs = stdev->partition_count;
  649. else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
  650. nr_idxs = stdev->pff_csr_count;
  651. else
  652. return -EINVAL;
  653. for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
  654. ret = event_ctl(stdev, &ctl);
  655. if (ret < 0)
  656. return ret;
  657. }
  658. } else {
  659. ret = event_ctl(stdev, &ctl);
  660. if (ret < 0)
  661. return ret;
  662. }
  663. if (copy_to_user(uctl, &ctl, sizeof(ctl)))
  664. return -EFAULT;
  665. return 0;
  666. }
  667. static int ioctl_pff_to_port(struct switchtec_dev *stdev,
  668. struct switchtec_ioctl_pff_port *up)
  669. {
  670. int i, part;
  671. u32 reg;
  672. struct part_cfg_regs *pcfg;
  673. struct switchtec_ioctl_pff_port p;
  674. if (copy_from_user(&p, up, sizeof(p)))
  675. return -EFAULT;
  676. p.port = -1;
  677. for (part = 0; part < stdev->partition_count; part++) {
  678. pcfg = &stdev->mmio_part_cfg_all[part];
  679. p.partition = part;
  680. reg = ioread32(&pcfg->usp_pff_inst_id);
  681. if (reg == p.pff) {
  682. p.port = 0;
  683. break;
  684. }
  685. reg = ioread32(&pcfg->vep_pff_inst_id);
  686. if (reg == p.pff) {
  687. p.port = SWITCHTEC_IOCTL_PFF_VEP;
  688. break;
  689. }
  690. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  691. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  692. if (reg != p.pff)
  693. continue;
  694. p.port = i + 1;
  695. break;
  696. }
  697. if (p.port != -1)
  698. break;
  699. }
  700. if (copy_to_user(up, &p, sizeof(p)))
  701. return -EFAULT;
  702. return 0;
  703. }
  704. static int ioctl_port_to_pff(struct switchtec_dev *stdev,
  705. struct switchtec_ioctl_pff_port *up)
  706. {
  707. struct switchtec_ioctl_pff_port p;
  708. struct part_cfg_regs *pcfg;
  709. if (copy_from_user(&p, up, sizeof(p)))
  710. return -EFAULT;
  711. if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
  712. pcfg = stdev->mmio_part_cfg;
  713. else if (p.partition < stdev->partition_count)
  714. pcfg = &stdev->mmio_part_cfg_all[p.partition];
  715. else
  716. return -EINVAL;
  717. switch (p.port) {
  718. case 0:
  719. p.pff = ioread32(&pcfg->usp_pff_inst_id);
  720. break;
  721. case SWITCHTEC_IOCTL_PFF_VEP:
  722. p.pff = ioread32(&pcfg->vep_pff_inst_id);
  723. break;
  724. default:
  725. if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
  726. return -EINVAL;
  727. p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
  728. break;
  729. }
  730. if (copy_to_user(up, &p, sizeof(p)))
  731. return -EFAULT;
  732. return 0;
  733. }
  734. static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
  735. unsigned long arg)
  736. {
  737. struct switchtec_user *stuser = filp->private_data;
  738. struct switchtec_dev *stdev = stuser->stdev;
  739. int rc;
  740. void __user *argp = (void __user *)arg;
  741. rc = lock_mutex_and_test_alive(stdev);
  742. if (rc)
  743. return rc;
  744. switch (cmd) {
  745. case SWITCHTEC_IOCTL_FLASH_INFO:
  746. rc = ioctl_flash_info(stdev, argp);
  747. break;
  748. case SWITCHTEC_IOCTL_FLASH_PART_INFO:
  749. rc = ioctl_flash_part_info(stdev, argp);
  750. break;
  751. case SWITCHTEC_IOCTL_EVENT_SUMMARY:
  752. rc = ioctl_event_summary(stdev, stuser, argp);
  753. break;
  754. case SWITCHTEC_IOCTL_EVENT_CTL:
  755. rc = ioctl_event_ctl(stdev, argp);
  756. break;
  757. case SWITCHTEC_IOCTL_PFF_TO_PORT:
  758. rc = ioctl_pff_to_port(stdev, argp);
  759. break;
  760. case SWITCHTEC_IOCTL_PORT_TO_PFF:
  761. rc = ioctl_port_to_pff(stdev, argp);
  762. break;
  763. default:
  764. rc = -ENOTTY;
  765. break;
  766. }
  767. mutex_unlock(&stdev->mrpc_mutex);
  768. return rc;
  769. }
  770. static const struct file_operations switchtec_fops = {
  771. .owner = THIS_MODULE,
  772. .open = switchtec_dev_open,
  773. .release = switchtec_dev_release,
  774. .write = switchtec_dev_write,
  775. .read = switchtec_dev_read,
  776. .poll = switchtec_dev_poll,
  777. .unlocked_ioctl = switchtec_dev_ioctl,
  778. .compat_ioctl = switchtec_dev_ioctl,
  779. };
  780. static void link_event_work(struct work_struct *work)
  781. {
  782. struct switchtec_dev *stdev;
  783. stdev = container_of(work, struct switchtec_dev, link_event_work);
  784. if (stdev->link_notifier)
  785. stdev->link_notifier(stdev);
  786. }
  787. static void check_link_state_events(struct switchtec_dev *stdev)
  788. {
  789. int idx;
  790. u32 reg;
  791. int count;
  792. int occurred = 0;
  793. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  794. reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
  795. dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
  796. count = (reg >> 5) & 0xFF;
  797. if (count != stdev->link_event_count[idx]) {
  798. occurred = 1;
  799. stdev->link_event_count[idx] = count;
  800. }
  801. }
  802. if (occurred)
  803. schedule_work(&stdev->link_event_work);
  804. }
  805. static void enable_link_state_events(struct switchtec_dev *stdev)
  806. {
  807. int idx;
  808. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  809. iowrite32(SWITCHTEC_EVENT_CLEAR |
  810. SWITCHTEC_EVENT_EN_IRQ,
  811. &stdev->mmio_pff_csr[idx].link_state_hdr);
  812. }
  813. }
  814. static void stdev_release(struct device *dev)
  815. {
  816. struct switchtec_dev *stdev = to_stdev(dev);
  817. kfree(stdev);
  818. }
  819. static void stdev_kill(struct switchtec_dev *stdev)
  820. {
  821. struct switchtec_user *stuser, *tmpuser;
  822. pci_clear_master(stdev->pdev);
  823. cancel_delayed_work_sync(&stdev->mrpc_timeout);
  824. /* Mark the hardware as unavailable and complete all completions */
  825. mutex_lock(&stdev->mrpc_mutex);
  826. stdev->alive = false;
  827. /* Wake up and kill any users waiting on an MRPC request */
  828. list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
  829. complete_all(&stuser->comp);
  830. list_del_init(&stuser->list);
  831. stuser_put(stuser);
  832. }
  833. mutex_unlock(&stdev->mrpc_mutex);
  834. /* Wake up any users waiting on event_wq */
  835. wake_up_interruptible(&stdev->event_wq);
  836. }
  837. static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
  838. {
  839. struct switchtec_dev *stdev;
  840. int minor;
  841. struct device *dev;
  842. struct cdev *cdev;
  843. int rc;
  844. stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
  845. dev_to_node(&pdev->dev));
  846. if (!stdev)
  847. return ERR_PTR(-ENOMEM);
  848. stdev->alive = true;
  849. stdev->pdev = pdev;
  850. INIT_LIST_HEAD(&stdev->mrpc_queue);
  851. mutex_init(&stdev->mrpc_mutex);
  852. stdev->mrpc_busy = 0;
  853. INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
  854. INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
  855. INIT_WORK(&stdev->link_event_work, link_event_work);
  856. init_waitqueue_head(&stdev->event_wq);
  857. atomic_set(&stdev->event_cnt, 0);
  858. dev = &stdev->dev;
  859. device_initialize(dev);
  860. dev->class = switchtec_class;
  861. dev->parent = &pdev->dev;
  862. dev->groups = switchtec_device_groups;
  863. dev->release = stdev_release;
  864. minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
  865. GFP_KERNEL);
  866. if (minor < 0) {
  867. rc = minor;
  868. goto err_put;
  869. }
  870. dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
  871. dev_set_name(dev, "switchtec%d", minor);
  872. cdev = &stdev->cdev;
  873. cdev_init(cdev, &switchtec_fops);
  874. cdev->owner = THIS_MODULE;
  875. return stdev;
  876. err_put:
  877. put_device(&stdev->dev);
  878. return ERR_PTR(rc);
  879. }
  880. static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
  881. {
  882. size_t off = event_regs[eid].offset;
  883. u32 __iomem *hdr_reg;
  884. u32 hdr;
  885. hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
  886. hdr = ioread32(hdr_reg);
  887. if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
  888. return 0;
  889. if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
  890. return 0;
  891. dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
  892. hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
  893. iowrite32(hdr, hdr_reg);
  894. return 1;
  895. }
  896. static int mask_all_events(struct switchtec_dev *stdev, int eid)
  897. {
  898. int idx;
  899. int count = 0;
  900. if (event_regs[eid].map_reg == part_ev_reg) {
  901. for (idx = 0; idx < stdev->partition_count; idx++)
  902. count += mask_event(stdev, eid, idx);
  903. } else if (event_regs[eid].map_reg == pff_ev_reg) {
  904. for (idx = 0; idx < stdev->pff_csr_count; idx++) {
  905. if (!stdev->pff_local[idx])
  906. continue;
  907. count += mask_event(stdev, eid, idx);
  908. }
  909. } else {
  910. count += mask_event(stdev, eid, 0);
  911. }
  912. return count;
  913. }
  914. static irqreturn_t switchtec_event_isr(int irq, void *dev)
  915. {
  916. struct switchtec_dev *stdev = dev;
  917. u32 reg;
  918. irqreturn_t ret = IRQ_NONE;
  919. int eid, event_count = 0;
  920. reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
  921. if (reg & SWITCHTEC_EVENT_OCCURRED) {
  922. dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
  923. ret = IRQ_HANDLED;
  924. schedule_work(&stdev->mrpc_work);
  925. iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
  926. }
  927. check_link_state_events(stdev);
  928. for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
  929. event_count += mask_all_events(stdev, eid);
  930. if (event_count) {
  931. atomic_inc(&stdev->event_cnt);
  932. wake_up_interruptible(&stdev->event_wq);
  933. dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
  934. event_count);
  935. return IRQ_HANDLED;
  936. }
  937. return ret;
  938. }
  939. static int switchtec_init_isr(struct switchtec_dev *stdev)
  940. {
  941. int nvecs;
  942. int event_irq;
  943. nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
  944. PCI_IRQ_MSIX | PCI_IRQ_MSI);
  945. if (nvecs < 0)
  946. return nvecs;
  947. event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
  948. if (event_irq < 0 || event_irq >= nvecs)
  949. return -EFAULT;
  950. event_irq = pci_irq_vector(stdev->pdev, event_irq);
  951. if (event_irq < 0)
  952. return event_irq;
  953. return devm_request_irq(&stdev->pdev->dev, event_irq,
  954. switchtec_event_isr, 0,
  955. KBUILD_MODNAME, stdev);
  956. }
  957. static void init_pff(struct switchtec_dev *stdev)
  958. {
  959. int i;
  960. u32 reg;
  961. struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
  962. for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
  963. reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
  964. if (reg != MICROSEMI_VENDOR_ID)
  965. break;
  966. }
  967. stdev->pff_csr_count = i;
  968. reg = ioread32(&pcfg->usp_pff_inst_id);
  969. if (reg < SWITCHTEC_MAX_PFF_CSR)
  970. stdev->pff_local[reg] = 1;
  971. reg = ioread32(&pcfg->vep_pff_inst_id);
  972. if (reg < SWITCHTEC_MAX_PFF_CSR)
  973. stdev->pff_local[reg] = 1;
  974. for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
  975. reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
  976. if (reg < SWITCHTEC_MAX_PFF_CSR)
  977. stdev->pff_local[reg] = 1;
  978. }
  979. }
  980. static int switchtec_init_pci(struct switchtec_dev *stdev,
  981. struct pci_dev *pdev)
  982. {
  983. int rc;
  984. rc = pcim_enable_device(pdev);
  985. if (rc)
  986. return rc;
  987. rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
  988. if (rc)
  989. return rc;
  990. pci_set_master(pdev);
  991. stdev->mmio = pcim_iomap_table(pdev)[0];
  992. stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
  993. stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
  994. stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
  995. stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
  996. stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
  997. stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
  998. stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
  999. stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
  1000. stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
  1001. stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
  1002. if (stdev->partition_count < 1)
  1003. stdev->partition_count = 1;
  1004. init_pff(stdev);
  1005. pci_set_drvdata(pdev, stdev);
  1006. return 0;
  1007. }
  1008. static int switchtec_pci_probe(struct pci_dev *pdev,
  1009. const struct pci_device_id *id)
  1010. {
  1011. struct switchtec_dev *stdev;
  1012. int rc;
  1013. if (pdev->class == MICROSEMI_NTB_CLASSCODE)
  1014. request_module_nowait("ntb_hw_switchtec");
  1015. stdev = stdev_create(pdev);
  1016. if (IS_ERR(stdev))
  1017. return PTR_ERR(stdev);
  1018. rc = switchtec_init_pci(stdev, pdev);
  1019. if (rc)
  1020. goto err_put;
  1021. rc = switchtec_init_isr(stdev);
  1022. if (rc) {
  1023. dev_err(&stdev->dev, "failed to init isr.\n");
  1024. goto err_put;
  1025. }
  1026. iowrite32(SWITCHTEC_EVENT_CLEAR |
  1027. SWITCHTEC_EVENT_EN_IRQ,
  1028. &stdev->mmio_part_cfg->mrpc_comp_hdr);
  1029. enable_link_state_events(stdev);
  1030. rc = cdev_device_add(&stdev->cdev, &stdev->dev);
  1031. if (rc)
  1032. goto err_devadd;
  1033. dev_info(&stdev->dev, "Management device registered.\n");
  1034. return 0;
  1035. err_devadd:
  1036. stdev_kill(stdev);
  1037. err_put:
  1038. ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1039. put_device(&stdev->dev);
  1040. return rc;
  1041. }
  1042. static void switchtec_pci_remove(struct pci_dev *pdev)
  1043. {
  1044. struct switchtec_dev *stdev = pci_get_drvdata(pdev);
  1045. pci_set_drvdata(pdev, NULL);
  1046. cdev_device_del(&stdev->cdev, &stdev->dev);
  1047. ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
  1048. dev_info(&stdev->dev, "unregistered.\n");
  1049. stdev_kill(stdev);
  1050. put_device(&stdev->dev);
  1051. }
  1052. #define SWITCHTEC_PCI_DEVICE(device_id) \
  1053. { \
  1054. .vendor = MICROSEMI_VENDOR_ID, \
  1055. .device = device_id, \
  1056. .subvendor = PCI_ANY_ID, \
  1057. .subdevice = PCI_ANY_ID, \
  1058. .class = MICROSEMI_MGMT_CLASSCODE, \
  1059. .class_mask = 0xFFFFFFFF, \
  1060. }, \
  1061. { \
  1062. .vendor = MICROSEMI_VENDOR_ID, \
  1063. .device = device_id, \
  1064. .subvendor = PCI_ANY_ID, \
  1065. .subdevice = PCI_ANY_ID, \
  1066. .class = MICROSEMI_NTB_CLASSCODE, \
  1067. .class_mask = 0xFFFFFFFF, \
  1068. }
  1069. static const struct pci_device_id switchtec_pci_tbl[] = {
  1070. SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
  1071. SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
  1072. SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
  1073. SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
  1074. SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
  1075. SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
  1076. SWITCHTEC_PCI_DEVICE(0x8541), //PSX 24xG3
  1077. SWITCHTEC_PCI_DEVICE(0x8542), //PSX 32xG3
  1078. SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
  1079. SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
  1080. SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
  1081. SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
  1082. SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3
  1083. SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3
  1084. SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3
  1085. SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3
  1086. SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3
  1087. SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3
  1088. SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3
  1089. SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3
  1090. SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3
  1091. SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3
  1092. SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3
  1093. SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3
  1094. SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3
  1095. SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3
  1096. SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3
  1097. SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3
  1098. SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3
  1099. SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3
  1100. {0}
  1101. };
  1102. MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
  1103. static struct pci_driver switchtec_pci_driver = {
  1104. .name = KBUILD_MODNAME,
  1105. .id_table = switchtec_pci_tbl,
  1106. .probe = switchtec_pci_probe,
  1107. .remove = switchtec_pci_remove,
  1108. };
  1109. static int __init switchtec_init(void)
  1110. {
  1111. int rc;
  1112. rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
  1113. "switchtec");
  1114. if (rc)
  1115. return rc;
  1116. switchtec_class = class_create(THIS_MODULE, "switchtec");
  1117. if (IS_ERR(switchtec_class)) {
  1118. rc = PTR_ERR(switchtec_class);
  1119. goto err_create_class;
  1120. }
  1121. rc = pci_register_driver(&switchtec_pci_driver);
  1122. if (rc)
  1123. goto err_pci_register;
  1124. pr_info(KBUILD_MODNAME ": loaded.\n");
  1125. return 0;
  1126. err_pci_register:
  1127. class_destroy(switchtec_class);
  1128. err_create_class:
  1129. unregister_chrdev_region(switchtec_devt, max_devices);
  1130. return rc;
  1131. }
  1132. module_init(switchtec_init);
  1133. static void __exit switchtec_exit(void)
  1134. {
  1135. pci_unregister_driver(&switchtec_pci_driver);
  1136. class_destroy(switchtec_class);
  1137. unregister_chrdev_region(switchtec_devt, max_devices);
  1138. ida_destroy(&switchtec_minor_ida);
  1139. pr_info(KBUILD_MODNAME ": unloaded.\n");
  1140. }
  1141. module_exit(switchtec_exit);