virtio_scsi.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. /*
  2. * Virtio SCSI HBA driver
  3. *
  4. * Copyright IBM Corp. 2010
  5. * Copyright Red Hat, Inc. 2011
  6. *
  7. * Authors:
  8. * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  9. * Paolo Bonzini <pbonzini@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12. * See the COPYING file in the top-level directory.
  13. *
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/mempool.h>
  19. #include <linux/virtio.h>
  20. #include <linux/virtio_ids.h>
  21. #include <linux/virtio_config.h>
  22. #include <linux/virtio_scsi.h>
  23. #include <linux/cpu.h>
  24. #include <linux/blkdev.h>
  25. #include <scsi/scsi_host.h>
  26. #include <scsi/scsi_device.h>
  27. #include <scsi/scsi_cmnd.h>
  28. #define VIRTIO_SCSI_MEMPOOL_SZ 64
  29. #define VIRTIO_SCSI_EVENT_LEN 8
  30. #define VIRTIO_SCSI_VQ_BASE 2
  31. /* Command queue element */
  32. struct virtio_scsi_cmd {
  33. struct scsi_cmnd *sc;
  34. struct completion *comp;
  35. union {
  36. struct virtio_scsi_cmd_req cmd;
  37. struct virtio_scsi_cmd_req_pi cmd_pi;
  38. struct virtio_scsi_ctrl_tmf_req tmf;
  39. struct virtio_scsi_ctrl_an_req an;
  40. } req;
  41. union {
  42. struct virtio_scsi_cmd_resp cmd;
  43. struct virtio_scsi_ctrl_tmf_resp tmf;
  44. struct virtio_scsi_ctrl_an_resp an;
  45. struct virtio_scsi_event evt;
  46. } resp;
  47. } ____cacheline_aligned_in_smp;
  48. struct virtio_scsi_event_node {
  49. struct virtio_scsi *vscsi;
  50. struct virtio_scsi_event event;
  51. struct work_struct work;
  52. };
  53. struct virtio_scsi_vq {
  54. /* Protects vq */
  55. spinlock_t vq_lock;
  56. struct virtqueue *vq;
  57. };
  58. /*
  59. * Per-target queue state.
  60. *
  61. * This struct holds the data needed by the queue steering policy. When a
  62. * target is sent multiple requests, we need to drive them to the same queue so
  63. * that FIFO processing order is kept. However, if a target was idle, we can
  64. * choose a queue arbitrarily. In this case the queue is chosen according to
  65. * the current VCPU, so the driver expects the number of request queues to be
  66. * equal to the number of VCPUs. This makes it easy and fast to select the
  67. * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
  68. * (each virtqueue's affinity is set to the CPU that "owns" the queue).
  69. *
  70. * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq
  71. * could be done locklessly, but we do not do it yet.
  72. *
  73. * Decrements of reqs are never concurrent with writes of req_vq: before the
  74. * decrement reqs will be != 0; after the decrement the virtqueue completion
  75. * routine will not use the req_vq so it can be changed by a new request.
  76. * Thus they can happen outside the tgt_lock, provided of course we make reqs
  77. * an atomic_t.
  78. */
  79. struct virtio_scsi_target_state {
  80. /* This spinlock never held at the same time as vq_lock. */
  81. spinlock_t tgt_lock;
  82. /* Count of outstanding requests. */
  83. atomic_t reqs;
  84. /* Currently active virtqueue for requests sent to this target. */
  85. struct virtio_scsi_vq *req_vq;
  86. };
  87. /* Driver instance state */
  88. struct virtio_scsi {
  89. struct virtio_device *vdev;
  90. /* Get some buffers ready for event vq */
  91. struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
  92. u32 num_queues;
  93. /* If the affinity hint is set for virtqueues */
  94. bool affinity_hint_set;
  95. /* CPU hotplug notifier */
  96. struct notifier_block nb;
  97. struct virtio_scsi_vq ctrl_vq;
  98. struct virtio_scsi_vq event_vq;
  99. struct virtio_scsi_vq req_vqs[];
  100. };
  101. static struct kmem_cache *virtscsi_cmd_cache;
  102. static mempool_t *virtscsi_cmd_pool;
  103. static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
  104. {
  105. return vdev->priv;
  106. }
  107. static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
  108. {
  109. if (!resid)
  110. return;
  111. if (!scsi_bidi_cmnd(sc)) {
  112. scsi_set_resid(sc, resid);
  113. return;
  114. }
  115. scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
  116. scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
  117. }
  118. /**
  119. * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
  120. *
  121. * Called with vq_lock held.
  122. */
  123. static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
  124. {
  125. struct virtio_scsi_cmd *cmd = buf;
  126. struct scsi_cmnd *sc = cmd->sc;
  127. struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
  128. struct virtio_scsi_target_state *tgt =
  129. scsi_target(sc->device)->hostdata;
  130. dev_dbg(&sc->device->sdev_gendev,
  131. "cmd %p response %u status %#02x sense_len %u\n",
  132. sc, resp->response, resp->status, resp->sense_len);
  133. sc->result = resp->status;
  134. virtscsi_compute_resid(sc, resp->resid);
  135. switch (resp->response) {
  136. case VIRTIO_SCSI_S_OK:
  137. set_host_byte(sc, DID_OK);
  138. break;
  139. case VIRTIO_SCSI_S_OVERRUN:
  140. set_host_byte(sc, DID_ERROR);
  141. break;
  142. case VIRTIO_SCSI_S_ABORTED:
  143. set_host_byte(sc, DID_ABORT);
  144. break;
  145. case VIRTIO_SCSI_S_BAD_TARGET:
  146. set_host_byte(sc, DID_BAD_TARGET);
  147. break;
  148. case VIRTIO_SCSI_S_RESET:
  149. set_host_byte(sc, DID_RESET);
  150. break;
  151. case VIRTIO_SCSI_S_BUSY:
  152. set_host_byte(sc, DID_BUS_BUSY);
  153. break;
  154. case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
  155. set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
  156. break;
  157. case VIRTIO_SCSI_S_TARGET_FAILURE:
  158. set_host_byte(sc, DID_TARGET_FAILURE);
  159. break;
  160. case VIRTIO_SCSI_S_NEXUS_FAILURE:
  161. set_host_byte(sc, DID_NEXUS_FAILURE);
  162. break;
  163. default:
  164. scmd_printk(KERN_WARNING, sc, "Unknown response %d",
  165. resp->response);
  166. /* fall through */
  167. case VIRTIO_SCSI_S_FAILURE:
  168. set_host_byte(sc, DID_ERROR);
  169. break;
  170. }
  171. WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
  172. if (sc->sense_buffer) {
  173. memcpy(sc->sense_buffer, resp->sense,
  174. min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
  175. if (resp->sense_len)
  176. set_driver_byte(sc, DRIVER_SENSE);
  177. }
  178. sc->scsi_done(sc);
  179. atomic_dec(&tgt->reqs);
  180. }
  181. static void virtscsi_vq_done(struct virtio_scsi *vscsi,
  182. struct virtio_scsi_vq *virtscsi_vq,
  183. void (*fn)(struct virtio_scsi *vscsi, void *buf))
  184. {
  185. void *buf;
  186. unsigned int len;
  187. unsigned long flags;
  188. struct virtqueue *vq = virtscsi_vq->vq;
  189. spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
  190. do {
  191. virtqueue_disable_cb(vq);
  192. while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
  193. fn(vscsi, buf);
  194. if (unlikely(virtqueue_is_broken(vq)))
  195. break;
  196. } while (!virtqueue_enable_cb(vq));
  197. spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
  198. }
  199. static void virtscsi_req_done(struct virtqueue *vq)
  200. {
  201. struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
  202. struct virtio_scsi *vscsi = shost_priv(sh);
  203. int index = vq->index - VIRTIO_SCSI_VQ_BASE;
  204. struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
  205. virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
  206. };
  207. static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
  208. {
  209. struct virtio_scsi_cmd *cmd = buf;
  210. if (cmd->comp)
  211. complete_all(cmd->comp);
  212. }
  213. static void virtscsi_ctrl_done(struct virtqueue *vq)
  214. {
  215. struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
  216. struct virtio_scsi *vscsi = shost_priv(sh);
  217. virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
  218. };
  219. static void virtscsi_handle_event(struct work_struct *work);
  220. static int virtscsi_kick_event(struct virtio_scsi *vscsi,
  221. struct virtio_scsi_event_node *event_node)
  222. {
  223. int err;
  224. struct scatterlist sg;
  225. unsigned long flags;
  226. INIT_WORK(&event_node->work, virtscsi_handle_event);
  227. sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
  228. spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
  229. err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
  230. GFP_ATOMIC);
  231. if (!err)
  232. virtqueue_kick(vscsi->event_vq.vq);
  233. spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
  234. return err;
  235. }
  236. static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
  237. {
  238. int i;
  239. for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
  240. vscsi->event_list[i].vscsi = vscsi;
  241. virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
  242. }
  243. return 0;
  244. }
  245. static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
  246. {
  247. int i;
  248. for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
  249. cancel_work_sync(&vscsi->event_list[i].work);
  250. }
  251. static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
  252. struct virtio_scsi_event *event)
  253. {
  254. struct scsi_device *sdev;
  255. struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
  256. unsigned int target = event->lun[1];
  257. unsigned int lun = (event->lun[2] << 8) | event->lun[3];
  258. switch (event->reason) {
  259. case VIRTIO_SCSI_EVT_RESET_RESCAN:
  260. scsi_add_device(shost, 0, target, lun);
  261. break;
  262. case VIRTIO_SCSI_EVT_RESET_REMOVED:
  263. sdev = scsi_device_lookup(shost, 0, target, lun);
  264. if (sdev) {
  265. scsi_remove_device(sdev);
  266. scsi_device_put(sdev);
  267. } else {
  268. pr_err("SCSI device %d 0 %d %d not found\n",
  269. shost->host_no, target, lun);
  270. }
  271. break;
  272. default:
  273. pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
  274. }
  275. }
  276. static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
  277. struct virtio_scsi_event *event)
  278. {
  279. struct scsi_device *sdev;
  280. struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
  281. unsigned int target = event->lun[1];
  282. unsigned int lun = (event->lun[2] << 8) | event->lun[3];
  283. u8 asc = event->reason & 255;
  284. u8 ascq = event->reason >> 8;
  285. sdev = scsi_device_lookup(shost, 0, target, lun);
  286. if (!sdev) {
  287. pr_err("SCSI device %d 0 %d %d not found\n",
  288. shost->host_no, target, lun);
  289. return;
  290. }
  291. /* Handle "Parameters changed", "Mode parameters changed", and
  292. "Capacity data has changed". */
  293. if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
  294. scsi_rescan_device(&sdev->sdev_gendev);
  295. scsi_device_put(sdev);
  296. }
  297. static void virtscsi_handle_event(struct work_struct *work)
  298. {
  299. struct virtio_scsi_event_node *event_node =
  300. container_of(work, struct virtio_scsi_event_node, work);
  301. struct virtio_scsi *vscsi = event_node->vscsi;
  302. struct virtio_scsi_event *event = &event_node->event;
  303. if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
  304. event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
  305. scsi_scan_host(virtio_scsi_host(vscsi->vdev));
  306. }
  307. switch (event->event) {
  308. case VIRTIO_SCSI_T_NO_EVENT:
  309. break;
  310. case VIRTIO_SCSI_T_TRANSPORT_RESET:
  311. virtscsi_handle_transport_reset(vscsi, event);
  312. break;
  313. case VIRTIO_SCSI_T_PARAM_CHANGE:
  314. virtscsi_handle_param_change(vscsi, event);
  315. break;
  316. default:
  317. pr_err("Unsupport virtio scsi event %x\n", event->event);
  318. }
  319. virtscsi_kick_event(vscsi, event_node);
  320. }
  321. static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
  322. {
  323. struct virtio_scsi_event_node *event_node = buf;
  324. schedule_work(&event_node->work);
  325. }
  326. static void virtscsi_event_done(struct virtqueue *vq)
  327. {
  328. struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
  329. struct virtio_scsi *vscsi = shost_priv(sh);
  330. virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
  331. };
  332. /**
  333. * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
  334. * @vq : the struct virtqueue we're talking about
  335. * @cmd : command structure
  336. * @req_size : size of the request buffer
  337. * @resp_size : size of the response buffer
  338. */
  339. static int virtscsi_add_cmd(struct virtqueue *vq,
  340. struct virtio_scsi_cmd *cmd,
  341. size_t req_size, size_t resp_size)
  342. {
  343. struct scsi_cmnd *sc = cmd->sc;
  344. struct scatterlist *sgs[6], req, resp;
  345. struct sg_table *out, *in;
  346. unsigned out_num = 0, in_num = 0;
  347. out = in = NULL;
  348. if (sc && sc->sc_data_direction != DMA_NONE) {
  349. if (sc->sc_data_direction != DMA_FROM_DEVICE)
  350. out = &scsi_out(sc)->table;
  351. if (sc->sc_data_direction != DMA_TO_DEVICE)
  352. in = &scsi_in(sc)->table;
  353. }
  354. /* Request header. */
  355. sg_init_one(&req, &cmd->req, req_size);
  356. sgs[out_num++] = &req;
  357. /* Data-out buffer. */
  358. if (out) {
  359. /* Place WRITE protection SGLs before Data OUT payload */
  360. if (scsi_prot_sg_count(sc))
  361. sgs[out_num++] = scsi_prot_sglist(sc);
  362. sgs[out_num++] = out->sgl;
  363. }
  364. /* Response header. */
  365. sg_init_one(&resp, &cmd->resp, resp_size);
  366. sgs[out_num + in_num++] = &resp;
  367. /* Data-in buffer */
  368. if (in) {
  369. /* Place READ protection SGLs before Data IN payload */
  370. if (scsi_prot_sg_count(sc))
  371. sgs[out_num + in_num++] = scsi_prot_sglist(sc);
  372. sgs[out_num + in_num++] = in->sgl;
  373. }
  374. return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
  375. }
  376. static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
  377. struct virtio_scsi_cmd *cmd,
  378. size_t req_size, size_t resp_size)
  379. {
  380. unsigned long flags;
  381. int err;
  382. bool needs_kick = false;
  383. spin_lock_irqsave(&vq->vq_lock, flags);
  384. err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
  385. if (!err)
  386. needs_kick = virtqueue_kick_prepare(vq->vq);
  387. spin_unlock_irqrestore(&vq->vq_lock, flags);
  388. if (needs_kick)
  389. virtqueue_notify(vq->vq);
  390. return err;
  391. }
  392. static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd,
  393. struct scsi_cmnd *sc)
  394. {
  395. cmd->lun[0] = 1;
  396. cmd->lun[1] = sc->device->id;
  397. cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
  398. cmd->lun[3] = sc->device->lun & 0xff;
  399. cmd->tag = (unsigned long)sc;
  400. cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
  401. cmd->prio = 0;
  402. cmd->crn = 0;
  403. }
  404. static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
  405. struct scsi_cmnd *sc)
  406. {
  407. struct request *rq = sc->request;
  408. struct blk_integrity *bi;
  409. virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc);
  410. if (!rq || !scsi_prot_sg_count(sc))
  411. return;
  412. bi = blk_get_integrity(rq->rq_disk);
  413. if (sc->sc_data_direction == DMA_TO_DEVICE)
  414. cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size;
  415. else if (sc->sc_data_direction == DMA_FROM_DEVICE)
  416. cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size;
  417. }
  418. static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
  419. struct virtio_scsi_vq *req_vq,
  420. struct scsi_cmnd *sc)
  421. {
  422. struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
  423. struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
  424. int req_size;
  425. BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
  426. /* TODO: check feature bit and fail if unsupported? */
  427. BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
  428. dev_dbg(&sc->device->sdev_gendev,
  429. "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
  430. memset(cmd, 0, sizeof(*cmd));
  431. cmd->sc = sc;
  432. BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
  433. if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
  434. virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc);
  435. memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
  436. req_size = sizeof(cmd->req.cmd_pi);
  437. } else {
  438. virtio_scsi_init_hdr(&cmd->req.cmd, sc);
  439. memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
  440. req_size = sizeof(cmd->req.cmd);
  441. }
  442. if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
  443. return SCSI_MLQUEUE_HOST_BUSY;
  444. return 0;
  445. }
  446. static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
  447. struct scsi_cmnd *sc)
  448. {
  449. struct virtio_scsi *vscsi = shost_priv(sh);
  450. struct virtio_scsi_target_state *tgt =
  451. scsi_target(sc->device)->hostdata;
  452. atomic_inc(&tgt->reqs);
  453. return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
  454. }
  455. static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
  456. struct virtio_scsi_target_state *tgt)
  457. {
  458. struct virtio_scsi_vq *vq;
  459. unsigned long flags;
  460. u32 queue_num;
  461. spin_lock_irqsave(&tgt->tgt_lock, flags);
  462. if (atomic_inc_return(&tgt->reqs) > 1)
  463. vq = tgt->req_vq;
  464. else {
  465. queue_num = smp_processor_id();
  466. while (unlikely(queue_num >= vscsi->num_queues))
  467. queue_num -= vscsi->num_queues;
  468. tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
  469. }
  470. spin_unlock_irqrestore(&tgt->tgt_lock, flags);
  471. return vq;
  472. }
  473. static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
  474. struct scsi_cmnd *sc)
  475. {
  476. struct virtio_scsi *vscsi = shost_priv(sh);
  477. struct virtio_scsi_target_state *tgt =
  478. scsi_target(sc->device)->hostdata;
  479. struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt);
  480. return virtscsi_queuecommand(vscsi, req_vq, sc);
  481. }
  482. static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
  483. {
  484. DECLARE_COMPLETION_ONSTACK(comp);
  485. int ret = FAILED;
  486. cmd->comp = &comp;
  487. if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
  488. sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
  489. goto out;
  490. wait_for_completion(&comp);
  491. if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
  492. cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
  493. ret = SUCCESS;
  494. out:
  495. mempool_free(cmd, virtscsi_cmd_pool);
  496. return ret;
  497. }
  498. static int virtscsi_device_reset(struct scsi_cmnd *sc)
  499. {
  500. struct virtio_scsi *vscsi = shost_priv(sc->device->host);
  501. struct virtio_scsi_cmd *cmd;
  502. sdev_printk(KERN_INFO, sc->device, "device reset\n");
  503. cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
  504. if (!cmd)
  505. return FAILED;
  506. memset(cmd, 0, sizeof(*cmd));
  507. cmd->sc = sc;
  508. cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
  509. .type = VIRTIO_SCSI_T_TMF,
  510. .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
  511. .lun[0] = 1,
  512. .lun[1] = sc->device->id,
  513. .lun[2] = (sc->device->lun >> 8) | 0x40,
  514. .lun[3] = sc->device->lun & 0xff,
  515. };
  516. return virtscsi_tmf(vscsi, cmd);
  517. }
  518. static int virtscsi_abort(struct scsi_cmnd *sc)
  519. {
  520. struct virtio_scsi *vscsi = shost_priv(sc->device->host);
  521. struct virtio_scsi_cmd *cmd;
  522. scmd_printk(KERN_INFO, sc, "abort\n");
  523. cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
  524. if (!cmd)
  525. return FAILED;
  526. memset(cmd, 0, sizeof(*cmd));
  527. cmd->sc = sc;
  528. cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
  529. .type = VIRTIO_SCSI_T_TMF,
  530. .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
  531. .lun[0] = 1,
  532. .lun[1] = sc->device->id,
  533. .lun[2] = (sc->device->lun >> 8) | 0x40,
  534. .lun[3] = sc->device->lun & 0xff,
  535. .tag = (unsigned long)sc,
  536. };
  537. return virtscsi_tmf(vscsi, cmd);
  538. }
  539. static int virtscsi_target_alloc(struct scsi_target *starget)
  540. {
  541. struct virtio_scsi_target_state *tgt =
  542. kmalloc(sizeof(*tgt), GFP_KERNEL);
  543. if (!tgt)
  544. return -ENOMEM;
  545. spin_lock_init(&tgt->tgt_lock);
  546. atomic_set(&tgt->reqs, 0);
  547. tgt->req_vq = NULL;
  548. starget->hostdata = tgt;
  549. return 0;
  550. }
  551. static void virtscsi_target_destroy(struct scsi_target *starget)
  552. {
  553. struct virtio_scsi_target_state *tgt = starget->hostdata;
  554. kfree(tgt);
  555. }
  556. static struct scsi_host_template virtscsi_host_template_single = {
  557. .module = THIS_MODULE,
  558. .name = "Virtio SCSI HBA",
  559. .proc_name = "virtio_scsi",
  560. .this_id = -1,
  561. .cmd_size = sizeof(struct virtio_scsi_cmd),
  562. .queuecommand = virtscsi_queuecommand_single,
  563. .eh_abort_handler = virtscsi_abort,
  564. .eh_device_reset_handler = virtscsi_device_reset,
  565. .can_queue = 1024,
  566. .dma_boundary = UINT_MAX,
  567. .use_clustering = ENABLE_CLUSTERING,
  568. .target_alloc = virtscsi_target_alloc,
  569. .target_destroy = virtscsi_target_destroy,
  570. };
  571. static struct scsi_host_template virtscsi_host_template_multi = {
  572. .module = THIS_MODULE,
  573. .name = "Virtio SCSI HBA",
  574. .proc_name = "virtio_scsi",
  575. .this_id = -1,
  576. .cmd_size = sizeof(struct virtio_scsi_cmd),
  577. .queuecommand = virtscsi_queuecommand_multi,
  578. .eh_abort_handler = virtscsi_abort,
  579. .eh_device_reset_handler = virtscsi_device_reset,
  580. .can_queue = 1024,
  581. .dma_boundary = UINT_MAX,
  582. .use_clustering = ENABLE_CLUSTERING,
  583. .target_alloc = virtscsi_target_alloc,
  584. .target_destroy = virtscsi_target_destroy,
  585. };
  586. #define virtscsi_config_get(vdev, fld) \
  587. ({ \
  588. typeof(((struct virtio_scsi_config *)0)->fld) __val; \
  589. virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
  590. __val; \
  591. })
  592. #define virtscsi_config_set(vdev, fld, val) \
  593. do { \
  594. typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
  595. virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
  596. } while(0)
  597. static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
  598. {
  599. int i;
  600. int cpu;
  601. /* In multiqueue mode, when the number of cpu is equal
  602. * to the number of request queues, we let the qeueues
  603. * to be private to one cpu by setting the affinity hint
  604. * to eliminate the contention.
  605. */
  606. if ((vscsi->num_queues == 1 ||
  607. vscsi->num_queues != num_online_cpus()) && affinity) {
  608. if (vscsi->affinity_hint_set)
  609. affinity = false;
  610. else
  611. return;
  612. }
  613. if (affinity) {
  614. i = 0;
  615. for_each_online_cpu(cpu) {
  616. virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
  617. i++;
  618. }
  619. vscsi->affinity_hint_set = true;
  620. } else {
  621. for (i = 0; i < vscsi->num_queues; i++) {
  622. if (!vscsi->req_vqs[i].vq)
  623. continue;
  624. virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
  625. }
  626. vscsi->affinity_hint_set = false;
  627. }
  628. }
  629. static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
  630. {
  631. get_online_cpus();
  632. __virtscsi_set_affinity(vscsi, affinity);
  633. put_online_cpus();
  634. }
  635. static int virtscsi_cpu_callback(struct notifier_block *nfb,
  636. unsigned long action, void *hcpu)
  637. {
  638. struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
  639. switch(action) {
  640. case CPU_ONLINE:
  641. case CPU_ONLINE_FROZEN:
  642. case CPU_DEAD:
  643. case CPU_DEAD_FROZEN:
  644. __virtscsi_set_affinity(vscsi, true);
  645. break;
  646. default:
  647. break;
  648. }
  649. return NOTIFY_OK;
  650. }
  651. static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
  652. struct virtqueue *vq)
  653. {
  654. spin_lock_init(&virtscsi_vq->vq_lock);
  655. virtscsi_vq->vq = vq;
  656. }
  657. static void virtscsi_scan(struct virtio_device *vdev)
  658. {
  659. struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv;
  660. scsi_scan_host(shost);
  661. }
  662. static void virtscsi_remove_vqs(struct virtio_device *vdev)
  663. {
  664. struct Scsi_Host *sh = virtio_scsi_host(vdev);
  665. struct virtio_scsi *vscsi = shost_priv(sh);
  666. virtscsi_set_affinity(vscsi, false);
  667. /* Stop all the virtqueues. */
  668. vdev->config->reset(vdev);
  669. vdev->config->del_vqs(vdev);
  670. }
  671. static int virtscsi_init(struct virtio_device *vdev,
  672. struct virtio_scsi *vscsi)
  673. {
  674. int err;
  675. u32 i;
  676. u32 num_vqs;
  677. vq_callback_t **callbacks;
  678. const char **names;
  679. struct virtqueue **vqs;
  680. num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
  681. vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
  682. callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
  683. names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
  684. if (!callbacks || !vqs || !names) {
  685. err = -ENOMEM;
  686. goto out;
  687. }
  688. callbacks[0] = virtscsi_ctrl_done;
  689. callbacks[1] = virtscsi_event_done;
  690. names[0] = "control";
  691. names[1] = "event";
  692. for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
  693. callbacks[i] = virtscsi_req_done;
  694. names[i] = "request";
  695. }
  696. /* Discover virtqueues and write information to configuration. */
  697. err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
  698. if (err)
  699. goto out;
  700. virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
  701. virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
  702. for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
  703. virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
  704. vqs[i]);
  705. virtscsi_set_affinity(vscsi, true);
  706. virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
  707. virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
  708. if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
  709. virtscsi_kick_event_all(vscsi);
  710. err = 0;
  711. out:
  712. kfree(names);
  713. kfree(callbacks);
  714. kfree(vqs);
  715. if (err)
  716. virtscsi_remove_vqs(vdev);
  717. return err;
  718. }
  719. static int virtscsi_probe(struct virtio_device *vdev)
  720. {
  721. struct Scsi_Host *shost;
  722. struct virtio_scsi *vscsi;
  723. int err, host_prot;
  724. u32 sg_elems, num_targets;
  725. u32 cmd_per_lun;
  726. u32 num_queues;
  727. struct scsi_host_template *hostt;
  728. /* We need to know how many queues before we allocate. */
  729. num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
  730. num_targets = virtscsi_config_get(vdev, max_target) + 1;
  731. if (num_queues == 1)
  732. hostt = &virtscsi_host_template_single;
  733. else
  734. hostt = &virtscsi_host_template_multi;
  735. shost = scsi_host_alloc(hostt,
  736. sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
  737. if (!shost)
  738. return -ENOMEM;
  739. sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
  740. shost->sg_tablesize = sg_elems;
  741. vscsi = shost_priv(shost);
  742. vscsi->vdev = vdev;
  743. vscsi->num_queues = num_queues;
  744. vdev->priv = shost;
  745. err = virtscsi_init(vdev, vscsi);
  746. if (err)
  747. goto virtscsi_init_failed;
  748. vscsi->nb.notifier_call = &virtscsi_cpu_callback;
  749. err = register_hotcpu_notifier(&vscsi->nb);
  750. if (err) {
  751. pr_err("registering cpu notifier failed\n");
  752. goto scsi_add_host_failed;
  753. }
  754. cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
  755. shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
  756. shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
  757. /* LUNs > 256 are reported with format 1, so they go in the range
  758. * 16640-32767.
  759. */
  760. shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
  761. shost->max_id = num_targets;
  762. shost->max_channel = 0;
  763. shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
  764. if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
  765. host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
  766. SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
  767. SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
  768. scsi_host_set_prot(shost, host_prot);
  769. scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
  770. }
  771. err = scsi_add_host(shost, &vdev->dev);
  772. if (err)
  773. goto scsi_add_host_failed;
  774. /*
  775. * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan()
  776. * after VIRTIO_CONFIG_S_DRIVER_OK has been set..
  777. */
  778. return 0;
  779. scsi_add_host_failed:
  780. vdev->config->del_vqs(vdev);
  781. virtscsi_init_failed:
  782. scsi_host_put(shost);
  783. return err;
  784. }
  785. static void virtscsi_remove(struct virtio_device *vdev)
  786. {
  787. struct Scsi_Host *shost = virtio_scsi_host(vdev);
  788. struct virtio_scsi *vscsi = shost_priv(shost);
  789. if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
  790. virtscsi_cancel_event_work(vscsi);
  791. scsi_remove_host(shost);
  792. unregister_hotcpu_notifier(&vscsi->nb);
  793. virtscsi_remove_vqs(vdev);
  794. scsi_host_put(shost);
  795. }
  796. #ifdef CONFIG_PM_SLEEP
  797. static int virtscsi_freeze(struct virtio_device *vdev)
  798. {
  799. struct Scsi_Host *sh = virtio_scsi_host(vdev);
  800. struct virtio_scsi *vscsi = shost_priv(sh);
  801. unregister_hotcpu_notifier(&vscsi->nb);
  802. virtscsi_remove_vqs(vdev);
  803. return 0;
  804. }
  805. static int virtscsi_restore(struct virtio_device *vdev)
  806. {
  807. struct Scsi_Host *sh = virtio_scsi_host(vdev);
  808. struct virtio_scsi *vscsi = shost_priv(sh);
  809. int err;
  810. err = virtscsi_init(vdev, vscsi);
  811. if (err)
  812. return err;
  813. err = register_hotcpu_notifier(&vscsi->nb);
  814. if (err)
  815. vdev->config->del_vqs(vdev);
  816. return err;
  817. }
  818. #endif
  819. static struct virtio_device_id id_table[] = {
  820. { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
  821. { 0 },
  822. };
  823. static unsigned int features[] = {
  824. VIRTIO_SCSI_F_HOTPLUG,
  825. VIRTIO_SCSI_F_CHANGE,
  826. VIRTIO_SCSI_F_T10_PI,
  827. };
  828. static struct virtio_driver virtio_scsi_driver = {
  829. .feature_table = features,
  830. .feature_table_size = ARRAY_SIZE(features),
  831. .driver.name = KBUILD_MODNAME,
  832. .driver.owner = THIS_MODULE,
  833. .id_table = id_table,
  834. .probe = virtscsi_probe,
  835. .scan = virtscsi_scan,
  836. #ifdef CONFIG_PM_SLEEP
  837. .freeze = virtscsi_freeze,
  838. .restore = virtscsi_restore,
  839. #endif
  840. .remove = virtscsi_remove,
  841. };
  842. static int __init init(void)
  843. {
  844. int ret = -ENOMEM;
  845. virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
  846. if (!virtscsi_cmd_cache) {
  847. pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
  848. goto error;
  849. }
  850. virtscsi_cmd_pool =
  851. mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
  852. virtscsi_cmd_cache);
  853. if (!virtscsi_cmd_pool) {
  854. pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
  855. goto error;
  856. }
  857. ret = register_virtio_driver(&virtio_scsi_driver);
  858. if (ret < 0)
  859. goto error;
  860. return 0;
  861. error:
  862. if (virtscsi_cmd_pool) {
  863. mempool_destroy(virtscsi_cmd_pool);
  864. virtscsi_cmd_pool = NULL;
  865. }
  866. if (virtscsi_cmd_cache) {
  867. kmem_cache_destroy(virtscsi_cmd_cache);
  868. virtscsi_cmd_cache = NULL;
  869. }
  870. return ret;
  871. }
  872. static void __exit fini(void)
  873. {
  874. unregister_virtio_driver(&virtio_scsi_driver);
  875. mempool_destroy(virtscsi_cmd_pool);
  876. kmem_cache_destroy(virtscsi_cmd_cache);
  877. }
  878. module_init(init);
  879. module_exit(fini);
  880. MODULE_DEVICE_TABLE(virtio, id_table);
  881. MODULE_DESCRIPTION("Virtio SCSI HBA driver");
  882. MODULE_LICENSE("GPL");