mic_virtio.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. /*
  2. * Intel MIC Platform Software Stack (MPSS)
  3. *
  4. * Copyright(c) 2013 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * The full GNU General Public License is included in this distribution in
  16. * the file called "COPYING".
  17. *
  18. * Intel MIC Host driver.
  19. *
  20. */
  21. #include <linux/pci.h>
  22. #include <linux/sched.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/mic_common.h>
  25. #include "../common/mic_dev.h"
  26. #include "mic_device.h"
  27. #include "mic_smpt.h"
  28. #include "mic_virtio.h"
  29. /*
  30. * Initiates the copies across the PCIe bus from card memory to
  31. * a user space buffer.
  32. */
  33. static int mic_virtio_copy_to_user(struct mic_vdev *mvdev,
  34. void __user *ubuf, size_t len, u64 addr)
  35. {
  36. int err;
  37. void __iomem *dbuf = mvdev->mdev->aper.va + addr;
  38. /*
  39. * We are copying from IO below an should ideally use something
  40. * like copy_to_user_fromio(..) if it existed.
  41. */
  42. if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
  43. err = -EFAULT;
  44. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  45. __func__, __LINE__, err);
  46. goto err;
  47. }
  48. mvdev->in_bytes += len;
  49. err = 0;
  50. err:
  51. return err;
  52. }
  53. /*
  54. * Initiates copies across the PCIe bus from a user space
  55. * buffer to card memory.
  56. */
  57. static int mic_virtio_copy_from_user(struct mic_vdev *mvdev,
  58. void __user *ubuf, size_t len, u64 addr)
  59. {
  60. int err;
  61. void __iomem *dbuf = mvdev->mdev->aper.va + addr;
  62. /*
  63. * We are copying to IO below and should ideally use something
  64. * like copy_from_user_toio(..) if it existed.
  65. */
  66. if (copy_from_user((void __force *)dbuf, ubuf, len)) {
  67. err = -EFAULT;
  68. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  69. __func__, __LINE__, err);
  70. goto err;
  71. }
  72. mvdev->out_bytes += len;
  73. err = 0;
  74. err:
  75. return err;
  76. }
  77. #define MIC_VRINGH_READ true
  78. /* The function to call to notify the card about added buffers */
  79. static void mic_notify(struct vringh *vrh)
  80. {
  81. struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh);
  82. struct mic_vdev *mvdev = mvrh->mvdev;
  83. s8 db = mvdev->dc->h2c_vdev_db;
  84. if (db != -1)
  85. mvdev->mdev->ops->send_intr(mvdev->mdev, db);
  86. }
  87. /* Determine the total number of bytes consumed in a VRINGH KIOV */
  88. static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
  89. {
  90. int i;
  91. u32 total = iov->consumed;
  92. for (i = 0; i < iov->i; i++)
  93. total += iov->iov[i].iov_len;
  94. return total;
  95. }
  96. /*
  97. * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
  98. * This API is heavily based on the vringh_iov_xfer(..) implementation
  99. * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
  100. * and vringh_iov_push_kern(..) directly is because there is no
  101. * way to override the VRINGH xfer(..) routines as of v3.10.
  102. */
  103. static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
  104. void __user *ubuf, size_t len, bool read, size_t *out_len)
  105. {
  106. int ret = 0;
  107. size_t partlen, tot_len = 0;
  108. while (len && iov->i < iov->used) {
  109. partlen = min(iov->iov[iov->i].iov_len, len);
  110. if (read)
  111. ret = mic_virtio_copy_to_user(mvdev,
  112. ubuf, partlen,
  113. (u64)iov->iov[iov->i].iov_base);
  114. else
  115. ret = mic_virtio_copy_from_user(mvdev,
  116. ubuf, partlen,
  117. (u64)iov->iov[iov->i].iov_base);
  118. if (ret) {
  119. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  120. __func__, __LINE__, ret);
  121. break;
  122. }
  123. len -= partlen;
  124. ubuf += partlen;
  125. tot_len += partlen;
  126. iov->consumed += partlen;
  127. iov->iov[iov->i].iov_len -= partlen;
  128. iov->iov[iov->i].iov_base += partlen;
  129. if (!iov->iov[iov->i].iov_len) {
  130. /* Fix up old iov element then increment. */
  131. iov->iov[iov->i].iov_len = iov->consumed;
  132. iov->iov[iov->i].iov_base -= iov->consumed;
  133. iov->consumed = 0;
  134. iov->i++;
  135. }
  136. }
  137. *out_len = tot_len;
  138. return ret;
  139. }
  140. /*
  141. * Use the standard VRINGH infrastructure in the kernel to fetch new
  142. * descriptors, initiate the copies and update the used ring.
  143. */
  144. static int _mic_virtio_copy(struct mic_vdev *mvdev,
  145. struct mic_copy_desc *copy)
  146. {
  147. int ret = 0;
  148. u32 iovcnt = copy->iovcnt;
  149. struct iovec iov;
  150. struct iovec __user *u_iov = copy->iov;
  151. void __user *ubuf = NULL;
  152. struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
  153. struct vringh_kiov *riov = &mvr->riov;
  154. struct vringh_kiov *wiov = &mvr->wiov;
  155. struct vringh *vrh = &mvr->vrh;
  156. u16 *head = &mvr->head;
  157. struct mic_vring *vr = &mvr->vring;
  158. size_t len = 0, out_len;
  159. copy->out_len = 0;
  160. /* Fetch a new IOVEC if all previous elements have been processed */
  161. if (riov->i == riov->used && wiov->i == wiov->used) {
  162. ret = vringh_getdesc_kern(vrh, riov, wiov,
  163. head, GFP_KERNEL);
  164. /* Check if there are available descriptors */
  165. if (ret <= 0)
  166. return ret;
  167. }
  168. while (iovcnt) {
  169. if (!len) {
  170. /* Copy over a new iovec from user space. */
  171. ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
  172. if (ret) {
  173. ret = -EINVAL;
  174. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  175. __func__, __LINE__, ret);
  176. break;
  177. }
  178. len = iov.iov_len;
  179. ubuf = iov.iov_base;
  180. }
  181. /* Issue all the read descriptors first */
  182. ret = mic_vringh_copy(mvdev, riov, ubuf, len,
  183. MIC_VRINGH_READ, &out_len);
  184. if (ret) {
  185. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  186. __func__, __LINE__, ret);
  187. break;
  188. }
  189. len -= out_len;
  190. ubuf += out_len;
  191. copy->out_len += out_len;
  192. /* Issue the write descriptors next */
  193. ret = mic_vringh_copy(mvdev, wiov, ubuf, len,
  194. !MIC_VRINGH_READ, &out_len);
  195. if (ret) {
  196. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  197. __func__, __LINE__, ret);
  198. break;
  199. }
  200. len -= out_len;
  201. ubuf += out_len;
  202. copy->out_len += out_len;
  203. if (!len) {
  204. /* One user space iovec is now completed */
  205. iovcnt--;
  206. u_iov++;
  207. }
  208. /* Exit loop if all elements in KIOVs have been processed. */
  209. if (riov->i == riov->used && wiov->i == wiov->used)
  210. break;
  211. }
  212. /*
  213. * Update the used ring if a descriptor was available and some data was
  214. * copied in/out and the user asked for a used ring update.
  215. */
  216. if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
  217. u32 total = 0;
  218. /* Determine the total data consumed */
  219. total += mic_vringh_iov_consumed(riov);
  220. total += mic_vringh_iov_consumed(wiov);
  221. vringh_complete_kern(vrh, *head, total);
  222. *head = USHRT_MAX;
  223. if (vringh_need_notify_kern(vrh) > 0)
  224. vringh_notify(vrh);
  225. vringh_kiov_cleanup(riov);
  226. vringh_kiov_cleanup(wiov);
  227. /* Update avail idx for user space */
  228. vr->info->avail_idx = vrh->last_avail_idx;
  229. }
  230. return ret;
  231. }
  232. static inline int mic_verify_copy_args(struct mic_vdev *mvdev,
  233. struct mic_copy_desc *copy)
  234. {
  235. if (copy->vr_idx >= mvdev->dd->num_vq) {
  236. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  237. __func__, __LINE__, -EINVAL);
  238. return -EINVAL;
  239. }
  240. return 0;
  241. }
  242. /* Copy a specified number of virtio descriptors in a chain */
  243. int mic_virtio_copy_desc(struct mic_vdev *mvdev,
  244. struct mic_copy_desc *copy)
  245. {
  246. int err;
  247. struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
  248. err = mic_verify_copy_args(mvdev, copy);
  249. if (err)
  250. return err;
  251. mutex_lock(&mvr->vr_mutex);
  252. if (!mic_vdevup(mvdev)) {
  253. err = -ENODEV;
  254. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  255. __func__, __LINE__, err);
  256. goto err;
  257. }
  258. err = _mic_virtio_copy(mvdev, copy);
  259. if (err) {
  260. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  261. __func__, __LINE__, err);
  262. }
  263. err:
  264. mutex_unlock(&mvr->vr_mutex);
  265. return err;
  266. }
  267. static void mic_virtio_init_post(struct mic_vdev *mvdev)
  268. {
  269. struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd);
  270. int i;
  271. for (i = 0; i < mvdev->dd->num_vq; i++) {
  272. if (!le64_to_cpu(vqconfig[i].used_address)) {
  273. dev_warn(mic_dev(mvdev), "used_address zero??\n");
  274. continue;
  275. }
  276. mvdev->mvr[i].vrh.vring.used =
  277. (void __force *)mvdev->mdev->aper.va +
  278. le64_to_cpu(vqconfig[i].used_address);
  279. }
  280. mvdev->dc->used_address_updated = 0;
  281. dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n",
  282. __func__, mvdev->virtio_id);
  283. }
  284. static inline void mic_virtio_device_reset(struct mic_vdev *mvdev)
  285. {
  286. int i;
  287. dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n",
  288. __func__, mvdev->dd->status, mvdev->virtio_id);
  289. for (i = 0; i < mvdev->dd->num_vq; i++)
  290. /*
  291. * Avoid lockdep false positive. The + 1 is for the mic
  292. * mutex which is held in the reset devices code path.
  293. */
  294. mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
  295. /* 0 status means "reset" */
  296. mvdev->dd->status = 0;
  297. mvdev->dc->vdev_reset = 0;
  298. mvdev->dc->host_ack = 1;
  299. for (i = 0; i < mvdev->dd->num_vq; i++) {
  300. struct vringh *vrh = &mvdev->mvr[i].vrh;
  301. mvdev->mvr[i].vring.info->avail_idx = 0;
  302. vrh->completed = 0;
  303. vrh->last_avail_idx = 0;
  304. vrh->last_used_idx = 0;
  305. }
  306. for (i = 0; i < mvdev->dd->num_vq; i++)
  307. mutex_unlock(&mvdev->mvr[i].vr_mutex);
  308. }
  309. void mic_virtio_reset_devices(struct mic_device *mdev)
  310. {
  311. struct list_head *pos, *tmp;
  312. struct mic_vdev *mvdev;
  313. dev_dbg(mdev->sdev->parent, "%s\n", __func__);
  314. list_for_each_safe(pos, tmp, &mdev->vdev_list) {
  315. mvdev = list_entry(pos, struct mic_vdev, list);
  316. mic_virtio_device_reset(mvdev);
  317. mvdev->poll_wake = 1;
  318. wake_up(&mvdev->waitq);
  319. }
  320. }
  321. void mic_bh_handler(struct work_struct *work)
  322. {
  323. struct mic_vdev *mvdev = container_of(work, struct mic_vdev,
  324. virtio_bh_work);
  325. if (mvdev->dc->used_address_updated)
  326. mic_virtio_init_post(mvdev);
  327. if (mvdev->dc->vdev_reset)
  328. mic_virtio_device_reset(mvdev);
  329. mvdev->poll_wake = 1;
  330. wake_up(&mvdev->waitq);
  331. }
  332. static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
  333. {
  334. struct mic_vdev *mvdev = data;
  335. struct mic_device *mdev = mvdev->mdev;
  336. mdev->ops->intr_workarounds(mdev);
  337. schedule_work(&mvdev->virtio_bh_work);
  338. return IRQ_HANDLED;
  339. }
  340. int mic_virtio_config_change(struct mic_vdev *mvdev,
  341. void __user *argp)
  342. {
  343. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
  344. int ret = 0, retry, i;
  345. struct mic_bootparam *bootparam = mvdev->mdev->dp;
  346. s8 db = bootparam->h2c_config_db;
  347. mutex_lock(&mvdev->mdev->mic_mutex);
  348. for (i = 0; i < mvdev->dd->num_vq; i++)
  349. mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
  350. if (db == -1 || mvdev->dd->type == -1) {
  351. ret = -EIO;
  352. goto exit;
  353. }
  354. if (copy_from_user(mic_vq_configspace(mvdev->dd),
  355. argp, mvdev->dd->config_len)) {
  356. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  357. __func__, __LINE__, -EFAULT);
  358. ret = -EFAULT;
  359. goto exit;
  360. }
  361. mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
  362. mvdev->mdev->ops->send_intr(mvdev->mdev, db);
  363. for (retry = 100; retry--;) {
  364. ret = wait_event_timeout(wake,
  365. mvdev->dc->guest_ack, msecs_to_jiffies(100));
  366. if (ret)
  367. break;
  368. }
  369. dev_dbg(mic_dev(mvdev),
  370. "%s %d retry: %d\n", __func__, __LINE__, retry);
  371. mvdev->dc->config_change = 0;
  372. mvdev->dc->guest_ack = 0;
  373. exit:
  374. for (i = 0; i < mvdev->dd->num_vq; i++)
  375. mutex_unlock(&mvdev->mvr[i].vr_mutex);
  376. mutex_unlock(&mvdev->mdev->mic_mutex);
  377. return ret;
  378. }
  379. static int mic_copy_dp_entry(struct mic_vdev *mvdev,
  380. void __user *argp,
  381. __u8 *type,
  382. struct mic_device_desc **devpage)
  383. {
  384. struct mic_device *mdev = mvdev->mdev;
  385. struct mic_device_desc dd, *dd_config, *devp;
  386. struct mic_vqconfig *vqconfig;
  387. int ret = 0, i;
  388. bool slot_found = false;
  389. if (copy_from_user(&dd, argp, sizeof(dd))) {
  390. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  391. __func__, __LINE__, -EFAULT);
  392. return -EFAULT;
  393. }
  394. if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
  395. dd.num_vq > MIC_MAX_VRINGS) {
  396. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  397. __func__, __LINE__, -EINVAL);
  398. return -EINVAL;
  399. }
  400. dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL);
  401. if (dd_config == NULL) {
  402. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  403. __func__, __LINE__, -ENOMEM);
  404. return -ENOMEM;
  405. }
  406. if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
  407. ret = -EFAULT;
  408. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  409. __func__, __LINE__, ret);
  410. goto exit;
  411. }
  412. vqconfig = mic_vq_config(dd_config);
  413. for (i = 0; i < dd.num_vq; i++) {
  414. if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
  415. ret = -EINVAL;
  416. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  417. __func__, __LINE__, ret);
  418. goto exit;
  419. }
  420. }
  421. /* Find the first free device page entry */
  422. for (i = sizeof(struct mic_bootparam);
  423. i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
  424. i += mic_total_desc_size(devp)) {
  425. devp = mdev->dp + i;
  426. if (devp->type == 0 || devp->type == -1) {
  427. slot_found = true;
  428. break;
  429. }
  430. }
  431. if (!slot_found) {
  432. ret = -EINVAL;
  433. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  434. __func__, __LINE__, ret);
  435. goto exit;
  436. }
  437. /*
  438. * Save off the type before doing the memcpy. Type will be set in the
  439. * end after completing all initialization for the new device.
  440. */
  441. *type = dd_config->type;
  442. dd_config->type = 0;
  443. memcpy(devp, dd_config, mic_desc_size(dd_config));
  444. *devpage = devp;
  445. exit:
  446. kfree(dd_config);
  447. return ret;
  448. }
  449. static void mic_init_device_ctrl(struct mic_vdev *mvdev,
  450. struct mic_device_desc *devpage)
  451. {
  452. struct mic_device_ctrl *dc;
  453. dc = (void *)devpage + mic_aligned_desc_size(devpage);
  454. dc->config_change = 0;
  455. dc->guest_ack = 0;
  456. dc->vdev_reset = 0;
  457. dc->host_ack = 0;
  458. dc->used_address_updated = 0;
  459. dc->c2h_vdev_db = -1;
  460. dc->h2c_vdev_db = -1;
  461. mvdev->dc = dc;
  462. }
  463. int mic_virtio_add_device(struct mic_vdev *mvdev,
  464. void __user *argp)
  465. {
  466. struct mic_device *mdev = mvdev->mdev;
  467. struct mic_device_desc *dd = NULL;
  468. struct mic_vqconfig *vqconfig;
  469. int vr_size, i, j, ret;
  470. u8 type = 0;
  471. s8 db;
  472. char irqname[10];
  473. struct mic_bootparam *bootparam = mdev->dp;
  474. u16 num;
  475. dma_addr_t vr_addr;
  476. mutex_lock(&mdev->mic_mutex);
  477. ret = mic_copy_dp_entry(mvdev, argp, &type, &dd);
  478. if (ret) {
  479. mutex_unlock(&mdev->mic_mutex);
  480. return ret;
  481. }
  482. mic_init_device_ctrl(mvdev, dd);
  483. mvdev->dd = dd;
  484. mvdev->virtio_id = type;
  485. vqconfig = mic_vq_config(dd);
  486. INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler);
  487. for (i = 0; i < dd->num_vq; i++) {
  488. struct mic_vringh *mvr = &mvdev->mvr[i];
  489. struct mic_vring *vr = &mvdev->mvr[i].vring;
  490. num = le16_to_cpu(vqconfig[i].num);
  491. mutex_init(&mvr->vr_mutex);
  492. vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
  493. sizeof(struct _mic_vring_info));
  494. vr->va = (void *)
  495. __get_free_pages(GFP_KERNEL | __GFP_ZERO,
  496. get_order(vr_size));
  497. if (!vr->va) {
  498. ret = -ENOMEM;
  499. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  500. __func__, __LINE__, ret);
  501. goto err;
  502. }
  503. vr->len = vr_size;
  504. vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
  505. vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i);
  506. vr_addr = mic_map_single(mdev, vr->va, vr_size);
  507. if (mic_map_error(vr_addr)) {
  508. free_pages((unsigned long)vr->va, get_order(vr_size));
  509. ret = -ENOMEM;
  510. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  511. __func__, __LINE__, ret);
  512. goto err;
  513. }
  514. vqconfig[i].address = cpu_to_le64(vr_addr);
  515. vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
  516. ret = vringh_init_kern(&mvr->vrh,
  517. *(u32 *)mic_vq_features(mvdev->dd), num, false,
  518. vr->vr.desc, vr->vr.avail, vr->vr.used);
  519. if (ret) {
  520. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  521. __func__, __LINE__, ret);
  522. goto err;
  523. }
  524. vringh_kiov_init(&mvr->riov, NULL, 0);
  525. vringh_kiov_init(&mvr->wiov, NULL, 0);
  526. mvr->head = USHRT_MAX;
  527. mvr->mvdev = mvdev;
  528. mvr->vrh.notify = mic_notify;
  529. dev_dbg(mdev->sdev->parent,
  530. "%s %d index %d va %p info %p vr_size 0x%x\n",
  531. __func__, __LINE__, i, vr->va, vr->info, vr_size);
  532. }
  533. snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id,
  534. mvdev->virtio_id);
  535. mvdev->virtio_db = mic_next_db(mdev);
  536. mvdev->virtio_cookie = mic_request_irq(mdev, mic_virtio_intr_handler,
  537. irqname, mvdev, mvdev->virtio_db, MIC_INTR_DB);
  538. if (IS_ERR(mvdev->virtio_cookie)) {
  539. ret = PTR_ERR(mvdev->virtio_cookie);
  540. dev_dbg(mdev->sdev->parent, "request irq failed\n");
  541. goto err;
  542. }
  543. mvdev->dc->c2h_vdev_db = mvdev->virtio_db;
  544. list_add_tail(&mvdev->list, &mdev->vdev_list);
  545. /*
  546. * Order the type update with previous stores. This write barrier
  547. * is paired with the corresponding read barrier before the uncached
  548. * system memory read of the type, on the card while scanning the
  549. * device page.
  550. */
  551. smp_wmb();
  552. dd->type = type;
  553. dev_dbg(mdev->sdev->parent, "Added virtio device id %d\n", dd->type);
  554. db = bootparam->h2c_config_db;
  555. if (db != -1)
  556. mdev->ops->send_intr(mdev, db);
  557. mutex_unlock(&mdev->mic_mutex);
  558. return 0;
  559. err:
  560. vqconfig = mic_vq_config(dd);
  561. for (j = 0; j < i; j++) {
  562. struct mic_vringh *mvr = &mvdev->mvr[j];
  563. mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address),
  564. mvr->vring.len);
  565. free_pages((unsigned long)mvr->vring.va,
  566. get_order(mvr->vring.len));
  567. }
  568. mutex_unlock(&mdev->mic_mutex);
  569. return ret;
  570. }
  571. void mic_virtio_del_device(struct mic_vdev *mvdev)
  572. {
  573. struct list_head *pos, *tmp;
  574. struct mic_vdev *tmp_mvdev;
  575. struct mic_device *mdev = mvdev->mdev;
  576. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
  577. int i, ret, retry;
  578. struct mic_vqconfig *vqconfig;
  579. struct mic_bootparam *bootparam = mdev->dp;
  580. s8 db;
  581. mutex_lock(&mdev->mic_mutex);
  582. db = bootparam->h2c_config_db;
  583. if (db == -1)
  584. goto skip_hot_remove;
  585. dev_dbg(mdev->sdev->parent,
  586. "Requesting hot remove id %d\n", mvdev->virtio_id);
  587. mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
  588. mdev->ops->send_intr(mdev, db);
  589. for (retry = 100; retry--;) {
  590. ret = wait_event_timeout(wake,
  591. mvdev->dc->guest_ack, msecs_to_jiffies(100));
  592. if (ret)
  593. break;
  594. }
  595. dev_dbg(mdev->sdev->parent,
  596. "Device id %d config_change %d guest_ack %d retry %d\n",
  597. mvdev->virtio_id, mvdev->dc->config_change,
  598. mvdev->dc->guest_ack, retry);
  599. mvdev->dc->config_change = 0;
  600. mvdev->dc->guest_ack = 0;
  601. skip_hot_remove:
  602. mic_free_irq(mdev, mvdev->virtio_cookie, mvdev);
  603. flush_work(&mvdev->virtio_bh_work);
  604. vqconfig = mic_vq_config(mvdev->dd);
  605. for (i = 0; i < mvdev->dd->num_vq; i++) {
  606. struct mic_vringh *mvr = &mvdev->mvr[i];
  607. vringh_kiov_cleanup(&mvr->riov);
  608. vringh_kiov_cleanup(&mvr->wiov);
  609. mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
  610. mvr->vring.len);
  611. free_pages((unsigned long)mvr->vring.va,
  612. get_order(mvr->vring.len));
  613. }
  614. list_for_each_safe(pos, tmp, &mdev->vdev_list) {
  615. tmp_mvdev = list_entry(pos, struct mic_vdev, list);
  616. if (tmp_mvdev == mvdev) {
  617. list_del(pos);
  618. dev_dbg(mdev->sdev->parent,
  619. "Removing virtio device id %d\n",
  620. mvdev->virtio_id);
  621. break;
  622. }
  623. }
  624. /*
  625. * Order the type update with previous stores. This write barrier
  626. * is paired with the corresponding read barrier before the uncached
  627. * system memory read of the type, on the card while scanning the
  628. * device page.
  629. */
  630. smp_wmb();
  631. mvdev->dd->type = -1;
  632. mutex_unlock(&mdev->mic_mutex);
  633. }