scm_blk.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. /*
  2. * Block driver for s390 storage class memory.
  3. *
  4. * Copyright IBM Corp. 2012
  5. * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
  6. */
  7. #define KMSG_COMPONENT "scm_block"
  8. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9. #include <linux/interrupt.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/mempool.h>
  12. #include <linux/module.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/genhd.h>
  15. #include <linux/slab.h>
  16. #include <linux/list.h>
  17. #include <asm/eadm.h>
  18. #include "scm_blk.h"
  19. debug_info_t *scm_debug;
  20. static int scm_major;
  21. static mempool_t *aidaw_pool;
  22. static DEFINE_SPINLOCK(list_lock);
  23. static LIST_HEAD(inactive_requests);
  24. static unsigned int nr_requests = 64;
  25. static unsigned int nr_requests_per_io = 8;
  26. static atomic_t nr_devices = ATOMIC_INIT(0);
  27. module_param(nr_requests, uint, S_IRUGO);
  28. MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
  29. module_param(nr_requests_per_io, uint, S_IRUGO);
  30. MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
  31. MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
  32. MODULE_LICENSE("GPL");
  33. MODULE_ALIAS("scm:scmdev*");
  34. static void __scm_free_rq(struct scm_request *scmrq)
  35. {
  36. struct aob_rq_header *aobrq = to_aobrq(scmrq);
  37. free_page((unsigned long) scmrq->aob);
  38. __scm_free_rq_cluster(scmrq);
  39. kfree(scmrq->request);
  40. kfree(aobrq);
  41. }
  42. static void scm_free_rqs(void)
  43. {
  44. struct list_head *iter, *safe;
  45. struct scm_request *scmrq;
  46. spin_lock_irq(&list_lock);
  47. list_for_each_safe(iter, safe, &inactive_requests) {
  48. scmrq = list_entry(iter, struct scm_request, list);
  49. list_del(&scmrq->list);
  50. __scm_free_rq(scmrq);
  51. }
  52. spin_unlock_irq(&list_lock);
  53. mempool_destroy(aidaw_pool);
  54. }
  55. static int __scm_alloc_rq(void)
  56. {
  57. struct aob_rq_header *aobrq;
  58. struct scm_request *scmrq;
  59. aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
  60. if (!aobrq)
  61. return -ENOMEM;
  62. scmrq = (void *) aobrq->data;
  63. scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
  64. if (!scmrq->aob)
  65. goto free;
  66. scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
  67. GFP_KERNEL);
  68. if (!scmrq->request)
  69. goto free;
  70. if (__scm_alloc_rq_cluster(scmrq))
  71. goto free;
  72. INIT_LIST_HEAD(&scmrq->list);
  73. spin_lock_irq(&list_lock);
  74. list_add(&scmrq->list, &inactive_requests);
  75. spin_unlock_irq(&list_lock);
  76. return 0;
  77. free:
  78. __scm_free_rq(scmrq);
  79. return -ENOMEM;
  80. }
  81. static int scm_alloc_rqs(unsigned int nrqs)
  82. {
  83. int ret = 0;
  84. aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
  85. if (!aidaw_pool)
  86. return -ENOMEM;
  87. while (nrqs-- && !ret)
  88. ret = __scm_alloc_rq();
  89. return ret;
  90. }
  91. static struct scm_request *scm_request_fetch(void)
  92. {
  93. struct scm_request *scmrq = NULL;
  94. spin_lock(&list_lock);
  95. if (list_empty(&inactive_requests))
  96. goto out;
  97. scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
  98. list_del(&scmrq->list);
  99. out:
  100. spin_unlock(&list_lock);
  101. return scmrq;
  102. }
  103. static void scm_request_done(struct scm_request *scmrq)
  104. {
  105. unsigned long flags;
  106. struct msb *msb;
  107. u64 aidaw;
  108. int i;
  109. for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
  110. msb = &scmrq->aob->msb[i];
  111. aidaw = msb->data_addr;
  112. if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
  113. IS_ALIGNED(aidaw, PAGE_SIZE))
  114. mempool_free(virt_to_page(aidaw), aidaw_pool);
  115. }
  116. spin_lock_irqsave(&list_lock, flags);
  117. list_add(&scmrq->list, &inactive_requests);
  118. spin_unlock_irqrestore(&list_lock, flags);
  119. }
  120. static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
  121. {
  122. return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
  123. }
  124. static inline struct aidaw *scm_aidaw_alloc(void)
  125. {
  126. struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
  127. return page ? page_address(page) : NULL;
  128. }
  129. static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
  130. {
  131. unsigned long _aidaw = (unsigned long) aidaw;
  132. unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
  133. return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
  134. }
  135. struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
  136. {
  137. struct aidaw *aidaw;
  138. if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
  139. return scmrq->next_aidaw;
  140. aidaw = scm_aidaw_alloc();
  141. if (aidaw)
  142. memset(aidaw, 0, PAGE_SIZE);
  143. return aidaw;
  144. }
  145. static int scm_request_prepare(struct scm_request *scmrq)
  146. {
  147. struct scm_blk_dev *bdev = scmrq->bdev;
  148. struct scm_device *scmdev = bdev->gendisk->private_data;
  149. int pos = scmrq->aob->request.msb_count;
  150. struct msb *msb = &scmrq->aob->msb[pos];
  151. struct request *req = scmrq->request[pos];
  152. struct req_iterator iter;
  153. struct aidaw *aidaw;
  154. struct bio_vec bv;
  155. aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
  156. if (!aidaw)
  157. return -ENOMEM;
  158. msb->bs = MSB_BS_4K;
  159. scmrq->aob->request.msb_count++;
  160. msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
  161. msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
  162. msb->flags |= MSB_FLAG_IDA;
  163. msb->data_addr = (u64) aidaw;
  164. rq_for_each_segment(bv, req, iter) {
  165. WARN_ON(bv.bv_offset);
  166. msb->blk_count += bv.bv_len >> 12;
  167. aidaw->data_addr = (u64) page_address(bv.bv_page);
  168. aidaw++;
  169. }
  170. scmrq->next_aidaw = aidaw;
  171. return 0;
  172. }
  173. static inline void scm_request_set(struct scm_request *scmrq,
  174. struct request *req)
  175. {
  176. scmrq->request[scmrq->aob->request.msb_count] = req;
  177. }
  178. static inline void scm_request_init(struct scm_blk_dev *bdev,
  179. struct scm_request *scmrq)
  180. {
  181. struct aob_rq_header *aobrq = to_aobrq(scmrq);
  182. struct aob *aob = scmrq->aob;
  183. memset(scmrq->request, 0,
  184. nr_requests_per_io * sizeof(scmrq->request[0]));
  185. memset(aob, 0, sizeof(*aob));
  186. aobrq->scmdev = bdev->scmdev;
  187. aob->request.cmd_code = ARQB_CMD_MOVE;
  188. aob->request.data = (u64) aobrq;
  189. scmrq->bdev = bdev;
  190. scmrq->retries = 4;
  191. scmrq->error = 0;
  192. /* We don't use all msbs - place aidaws at the end of the aob page. */
  193. scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
  194. scm_request_cluster_init(scmrq);
  195. }
  196. static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
  197. {
  198. if (atomic_read(&bdev->queued_reqs)) {
  199. /* Queue restart is triggered by the next interrupt. */
  200. return;
  201. }
  202. blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
  203. }
  204. void scm_request_requeue(struct scm_request *scmrq)
  205. {
  206. struct scm_blk_dev *bdev = scmrq->bdev;
  207. int i;
  208. scm_release_cluster(scmrq);
  209. for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
  210. blk_requeue_request(bdev->rq, scmrq->request[i]);
  211. atomic_dec(&bdev->queued_reqs);
  212. scm_request_done(scmrq);
  213. scm_ensure_queue_restart(bdev);
  214. }
  215. void scm_request_finish(struct scm_request *scmrq)
  216. {
  217. struct scm_blk_dev *bdev = scmrq->bdev;
  218. int i;
  219. scm_release_cluster(scmrq);
  220. for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
  221. blk_end_request_all(scmrq->request[i], scmrq->error);
  222. atomic_dec(&bdev->queued_reqs);
  223. scm_request_done(scmrq);
  224. }
  225. static int scm_request_start(struct scm_request *scmrq)
  226. {
  227. struct scm_blk_dev *bdev = scmrq->bdev;
  228. int ret;
  229. atomic_inc(&bdev->queued_reqs);
  230. if (!scmrq->aob->request.msb_count) {
  231. scm_request_requeue(scmrq);
  232. return -EINVAL;
  233. }
  234. ret = eadm_start_aob(scmrq->aob);
  235. if (ret) {
  236. SCM_LOG(5, "no subchannel");
  237. scm_request_requeue(scmrq);
  238. }
  239. return ret;
  240. }
  241. static void scm_blk_request(struct request_queue *rq)
  242. {
  243. struct scm_device *scmdev = rq->queuedata;
  244. struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
  245. struct scm_request *scmrq = NULL;
  246. struct request *req;
  247. while ((req = blk_peek_request(rq))) {
  248. if (!scm_permit_request(bdev, req))
  249. goto out;
  250. if (!scmrq) {
  251. scmrq = scm_request_fetch();
  252. if (!scmrq) {
  253. SCM_LOG(5, "no request");
  254. goto out;
  255. }
  256. scm_request_init(bdev, scmrq);
  257. }
  258. scm_request_set(scmrq, req);
  259. if (!scm_reserve_cluster(scmrq)) {
  260. SCM_LOG(5, "cluster busy");
  261. scm_request_set(scmrq, NULL);
  262. if (scmrq->aob->request.msb_count)
  263. goto out;
  264. scm_request_done(scmrq);
  265. return;
  266. }
  267. if (scm_need_cluster_request(scmrq)) {
  268. if (scmrq->aob->request.msb_count) {
  269. /* Start cluster requests separately. */
  270. scm_request_set(scmrq, NULL);
  271. if (scm_request_start(scmrq))
  272. return;
  273. } else {
  274. atomic_inc(&bdev->queued_reqs);
  275. blk_start_request(req);
  276. scm_initiate_cluster_request(scmrq);
  277. }
  278. scmrq = NULL;
  279. continue;
  280. }
  281. if (scm_request_prepare(scmrq)) {
  282. SCM_LOG(5, "aidaw alloc failed");
  283. scm_request_set(scmrq, NULL);
  284. goto out;
  285. }
  286. blk_start_request(req);
  287. if (scmrq->aob->request.msb_count < nr_requests_per_io)
  288. continue;
  289. if (scm_request_start(scmrq))
  290. return;
  291. scmrq = NULL;
  292. }
  293. out:
  294. if (scmrq)
  295. scm_request_start(scmrq);
  296. else
  297. scm_ensure_queue_restart(bdev);
  298. }
  299. static void __scmrq_log_error(struct scm_request *scmrq)
  300. {
  301. struct aob *aob = scmrq->aob;
  302. if (scmrq->error == -ETIMEDOUT)
  303. SCM_LOG(1, "Request timeout");
  304. else {
  305. SCM_LOG(1, "Request error");
  306. SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
  307. }
  308. if (scmrq->retries)
  309. SCM_LOG(1, "Retry request");
  310. else
  311. pr_err("An I/O operation to SCM failed with rc=%d\n",
  312. scmrq->error);
  313. }
  314. void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
  315. {
  316. struct scm_request *scmrq = data;
  317. struct scm_blk_dev *bdev = scmrq->bdev;
  318. scmrq->error = error;
  319. if (error)
  320. __scmrq_log_error(scmrq);
  321. spin_lock(&bdev->lock);
  322. list_add_tail(&scmrq->list, &bdev->finished_requests);
  323. spin_unlock(&bdev->lock);
  324. tasklet_hi_schedule(&bdev->tasklet);
  325. }
  326. static void scm_blk_handle_error(struct scm_request *scmrq)
  327. {
  328. struct scm_blk_dev *bdev = scmrq->bdev;
  329. unsigned long flags;
  330. if (scmrq->error != -EIO)
  331. goto restart;
  332. /* For -EIO the response block is valid. */
  333. switch (scmrq->aob->response.eqc) {
  334. case EQC_WR_PROHIBIT:
  335. spin_lock_irqsave(&bdev->lock, flags);
  336. if (bdev->state != SCM_WR_PROHIBIT)
  337. pr_info("%lx: Write access to the SCM increment is suspended\n",
  338. (unsigned long) bdev->scmdev->address);
  339. bdev->state = SCM_WR_PROHIBIT;
  340. spin_unlock_irqrestore(&bdev->lock, flags);
  341. goto requeue;
  342. default:
  343. break;
  344. }
  345. restart:
  346. if (!eadm_start_aob(scmrq->aob))
  347. return;
  348. requeue:
  349. spin_lock_irqsave(&bdev->rq_lock, flags);
  350. scm_request_requeue(scmrq);
  351. spin_unlock_irqrestore(&bdev->rq_lock, flags);
  352. }
  353. static void scm_blk_tasklet(struct scm_blk_dev *bdev)
  354. {
  355. struct scm_request *scmrq;
  356. unsigned long flags;
  357. spin_lock_irqsave(&bdev->lock, flags);
  358. while (!list_empty(&bdev->finished_requests)) {
  359. scmrq = list_first_entry(&bdev->finished_requests,
  360. struct scm_request, list);
  361. list_del(&scmrq->list);
  362. spin_unlock_irqrestore(&bdev->lock, flags);
  363. if (scmrq->error && scmrq->retries-- > 0) {
  364. scm_blk_handle_error(scmrq);
  365. /* Request restarted or requeued, handle next. */
  366. spin_lock_irqsave(&bdev->lock, flags);
  367. continue;
  368. }
  369. if (scm_test_cluster_request(scmrq)) {
  370. scm_cluster_request_irq(scmrq);
  371. spin_lock_irqsave(&bdev->lock, flags);
  372. continue;
  373. }
  374. scm_request_finish(scmrq);
  375. spin_lock_irqsave(&bdev->lock, flags);
  376. }
  377. spin_unlock_irqrestore(&bdev->lock, flags);
  378. /* Look out for more requests. */
  379. blk_run_queue(bdev->rq);
  380. }
  381. static const struct block_device_operations scm_blk_devops = {
  382. .owner = THIS_MODULE,
  383. };
  384. int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
  385. {
  386. struct request_queue *rq;
  387. int len, ret = -ENOMEM;
  388. unsigned int devindex, nr_max_blk;
  389. devindex = atomic_inc_return(&nr_devices) - 1;
  390. /* scma..scmz + scmaa..scmzz */
  391. if (devindex > 701) {
  392. ret = -ENODEV;
  393. goto out;
  394. }
  395. bdev->scmdev = scmdev;
  396. bdev->state = SCM_OPER;
  397. spin_lock_init(&bdev->rq_lock);
  398. spin_lock_init(&bdev->lock);
  399. INIT_LIST_HEAD(&bdev->finished_requests);
  400. atomic_set(&bdev->queued_reqs, 0);
  401. tasklet_init(&bdev->tasklet,
  402. (void (*)(unsigned long)) scm_blk_tasklet,
  403. (unsigned long) bdev);
  404. rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
  405. if (!rq)
  406. goto out;
  407. bdev->rq = rq;
  408. nr_max_blk = min(scmdev->nr_max_block,
  409. (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
  410. blk_queue_logical_block_size(rq, 1 << 12);
  411. blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
  412. blk_queue_max_segments(rq, nr_max_blk);
  413. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
  414. queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
  415. scm_blk_dev_cluster_setup(bdev);
  416. bdev->gendisk = alloc_disk(SCM_NR_PARTS);
  417. if (!bdev->gendisk)
  418. goto out_queue;
  419. rq->queuedata = scmdev;
  420. bdev->gendisk->private_data = scmdev;
  421. bdev->gendisk->fops = &scm_blk_devops;
  422. bdev->gendisk->queue = rq;
  423. bdev->gendisk->major = scm_major;
  424. bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
  425. len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
  426. if (devindex > 25) {
  427. len += snprintf(bdev->gendisk->disk_name + len,
  428. DISK_NAME_LEN - len, "%c",
  429. 'a' + (devindex / 26) - 1);
  430. devindex = devindex % 26;
  431. }
  432. snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
  433. 'a' + devindex);
  434. /* 512 byte sectors */
  435. set_capacity(bdev->gendisk, scmdev->size >> 9);
  436. device_add_disk(&scmdev->dev, bdev->gendisk);
  437. return 0;
  438. out_queue:
  439. blk_cleanup_queue(rq);
  440. out:
  441. atomic_dec(&nr_devices);
  442. return ret;
  443. }
  444. void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
  445. {
  446. tasklet_kill(&bdev->tasklet);
  447. del_gendisk(bdev->gendisk);
  448. blk_cleanup_queue(bdev->gendisk->queue);
  449. put_disk(bdev->gendisk);
  450. }
  451. void scm_blk_set_available(struct scm_blk_dev *bdev)
  452. {
  453. unsigned long flags;
  454. spin_lock_irqsave(&bdev->lock, flags);
  455. if (bdev->state == SCM_WR_PROHIBIT)
  456. pr_info("%lx: Write access to the SCM increment is restored\n",
  457. (unsigned long) bdev->scmdev->address);
  458. bdev->state = SCM_OPER;
  459. spin_unlock_irqrestore(&bdev->lock, flags);
  460. }
  461. static bool __init scm_blk_params_valid(void)
  462. {
  463. if (!nr_requests_per_io || nr_requests_per_io > 64)
  464. return false;
  465. return scm_cluster_size_valid();
  466. }
  467. static int __init scm_blk_init(void)
  468. {
  469. int ret = -EINVAL;
  470. if (!scm_blk_params_valid())
  471. goto out;
  472. ret = register_blkdev(0, "scm");
  473. if (ret < 0)
  474. goto out;
  475. scm_major = ret;
  476. ret = scm_alloc_rqs(nr_requests);
  477. if (ret)
  478. goto out_free;
  479. scm_debug = debug_register("scm_log", 16, 1, 16);
  480. if (!scm_debug) {
  481. ret = -ENOMEM;
  482. goto out_free;
  483. }
  484. debug_register_view(scm_debug, &debug_hex_ascii_view);
  485. debug_set_level(scm_debug, 2);
  486. ret = scm_drv_init();
  487. if (ret)
  488. goto out_dbf;
  489. return ret;
  490. out_dbf:
  491. debug_unregister(scm_debug);
  492. out_free:
  493. scm_free_rqs();
  494. unregister_blkdev(scm_major, "scm");
  495. out:
  496. return ret;
  497. }
  498. module_init(scm_blk_init);
  499. static void __exit scm_blk_cleanup(void)
  500. {
  501. scm_drv_cleanup();
  502. debug_unregister(scm_debug);
  503. scm_free_rqs();
  504. unregister_blkdev(scm_major, "scm");
  505. }
  506. module_exit(scm_blk_cleanup);