target_core_rd.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. /*******************************************************************************
  2. * Filename: target_core_rd.c
  3. *
  4. * This file contains the Storage Engine <-> Ramdisk transport
  5. * specific functions.
  6. *
  7. * (c) Copyright 2003-2013 Datera, Inc.
  8. *
  9. * Nicholas A. Bellinger <nab@kernel.org>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  24. *
  25. ******************************************************************************/
  26. #include <linux/string.h>
  27. #include <linux/parser.h>
  28. #include <linux/timer.h>
  29. #include <linux/slab.h>
  30. #include <linux/spinlock.h>
  31. #include <scsi/scsi.h>
  32. #include <scsi/scsi_host.h>
  33. #include <target/target_core_base.h>
  34. #include <target/target_core_backend.h>
  35. #include <target/target_core_backend_configfs.h>
  36. #include "target_core_rd.h"
  37. static inline struct rd_dev *RD_DEV(struct se_device *dev)
  38. {
  39. return container_of(dev, struct rd_dev, dev);
  40. }
  41. /* rd_attach_hba(): (Part of se_subsystem_api_t template)
  42. *
  43. *
  44. */
  45. static int rd_attach_hba(struct se_hba *hba, u32 host_id)
  46. {
  47. struct rd_host *rd_host;
  48. rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
  49. if (!rd_host) {
  50. pr_err("Unable to allocate memory for struct rd_host\n");
  51. return -ENOMEM;
  52. }
  53. rd_host->rd_host_id = host_id;
  54. hba->hba_ptr = rd_host;
  55. pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
  56. " Generic Target Core Stack %s\n", hba->hba_id,
  57. RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
  58. return 0;
  59. }
  60. static void rd_detach_hba(struct se_hba *hba)
  61. {
  62. struct rd_host *rd_host = hba->hba_ptr;
  63. pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
  64. " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
  65. kfree(rd_host);
  66. hba->hba_ptr = NULL;
  67. }
  68. static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
  69. u32 sg_table_count)
  70. {
  71. struct page *pg;
  72. struct scatterlist *sg;
  73. u32 i, j, page_count = 0, sg_per_table;
  74. for (i = 0; i < sg_table_count; i++) {
  75. sg = sg_table[i].sg_table;
  76. sg_per_table = sg_table[i].rd_sg_count;
  77. for (j = 0; j < sg_per_table; j++) {
  78. pg = sg_page(&sg[j]);
  79. if (pg) {
  80. __free_page(pg);
  81. page_count++;
  82. }
  83. }
  84. kfree(sg);
  85. }
  86. kfree(sg_table);
  87. return page_count;
  88. }
  89. static void rd_release_device_space(struct rd_dev *rd_dev)
  90. {
  91. u32 page_count;
  92. if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
  93. return;
  94. page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
  95. rd_dev->sg_table_count);
  96. pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
  97. " Device ID: %u, pages %u in %u tables total bytes %lu\n",
  98. rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
  99. rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
  100. rd_dev->sg_table_array = NULL;
  101. rd_dev->sg_table_count = 0;
  102. }
  103. /* rd_build_device_space():
  104. *
  105. *
  106. */
  107. static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
  108. u32 total_sg_needed, unsigned char init_payload)
  109. {
  110. u32 i = 0, j, page_offset = 0, sg_per_table;
  111. u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
  112. sizeof(struct scatterlist));
  113. struct page *pg;
  114. struct scatterlist *sg;
  115. unsigned char *p;
  116. while (total_sg_needed) {
  117. sg_per_table = (total_sg_needed > max_sg_per_table) ?
  118. max_sg_per_table : total_sg_needed;
  119. sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
  120. GFP_KERNEL);
  121. if (!sg) {
  122. pr_err("Unable to allocate scatterlist array"
  123. " for struct rd_dev\n");
  124. return -ENOMEM;
  125. }
  126. sg_init_table(sg, sg_per_table);
  127. sg_table[i].sg_table = sg;
  128. sg_table[i].rd_sg_count = sg_per_table;
  129. sg_table[i].page_start_offset = page_offset;
  130. sg_table[i++].page_end_offset = (page_offset + sg_per_table)
  131. - 1;
  132. for (j = 0; j < sg_per_table; j++) {
  133. pg = alloc_pages(GFP_KERNEL, 0);
  134. if (!pg) {
  135. pr_err("Unable to allocate scatterlist"
  136. " pages for struct rd_dev_sg_table\n");
  137. return -ENOMEM;
  138. }
  139. sg_assign_page(&sg[j], pg);
  140. sg[j].length = PAGE_SIZE;
  141. p = kmap(pg);
  142. memset(p, init_payload, PAGE_SIZE);
  143. kunmap(pg);
  144. }
  145. page_offset += sg_per_table;
  146. total_sg_needed -= sg_per_table;
  147. }
  148. return 0;
  149. }
  150. static int rd_build_device_space(struct rd_dev *rd_dev)
  151. {
  152. struct rd_dev_sg_table *sg_table;
  153. u32 sg_tables, total_sg_needed;
  154. u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
  155. sizeof(struct scatterlist));
  156. int rc;
  157. if (rd_dev->rd_page_count <= 0) {
  158. pr_err("Illegal page count: %u for Ramdisk device\n",
  159. rd_dev->rd_page_count);
  160. return -EINVAL;
  161. }
  162. /* Don't need backing pages for NULLIO */
  163. if (rd_dev->rd_flags & RDF_NULLIO)
  164. return 0;
  165. total_sg_needed = rd_dev->rd_page_count;
  166. sg_tables = (total_sg_needed / max_sg_per_table) + 1;
  167. sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
  168. if (!sg_table) {
  169. pr_err("Unable to allocate memory for Ramdisk"
  170. " scatterlist tables\n");
  171. return -ENOMEM;
  172. }
  173. rd_dev->sg_table_array = sg_table;
  174. rd_dev->sg_table_count = sg_tables;
  175. rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
  176. if (rc)
  177. return rc;
  178. pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
  179. " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
  180. rd_dev->rd_dev_id, rd_dev->rd_page_count,
  181. rd_dev->sg_table_count);
  182. return 0;
  183. }
  184. static void rd_release_prot_space(struct rd_dev *rd_dev)
  185. {
  186. u32 page_count;
  187. if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
  188. return;
  189. page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
  190. rd_dev->sg_prot_count);
  191. pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
  192. " Device ID: %u, pages %u in %u tables total bytes %lu\n",
  193. rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
  194. rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
  195. rd_dev->sg_prot_array = NULL;
  196. rd_dev->sg_prot_count = 0;
  197. }
  198. static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
  199. {
  200. struct rd_dev_sg_table *sg_table;
  201. u32 total_sg_needed, sg_tables;
  202. u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
  203. sizeof(struct scatterlist));
  204. int rc;
  205. if (rd_dev->rd_flags & RDF_NULLIO)
  206. return 0;
  207. /*
  208. * prot_length=8byte dif data
  209. * tot sg needed = rd_page_count * (PGSZ/block_size) *
  210. * (prot_length/block_size) + pad
  211. * PGSZ canceled each other.
  212. */
  213. total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
  214. sg_tables = (total_sg_needed / max_sg_per_table) + 1;
  215. sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
  216. if (!sg_table) {
  217. pr_err("Unable to allocate memory for Ramdisk protection"
  218. " scatterlist tables\n");
  219. return -ENOMEM;
  220. }
  221. rd_dev->sg_prot_array = sg_table;
  222. rd_dev->sg_prot_count = sg_tables;
  223. rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
  224. if (rc)
  225. return rc;
  226. pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
  227. " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
  228. rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
  229. return 0;
  230. }
  231. static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
  232. {
  233. struct rd_dev *rd_dev;
  234. struct rd_host *rd_host = hba->hba_ptr;
  235. rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
  236. if (!rd_dev) {
  237. pr_err("Unable to allocate memory for struct rd_dev\n");
  238. return NULL;
  239. }
  240. rd_dev->rd_host = rd_host;
  241. return &rd_dev->dev;
  242. }
  243. static int rd_configure_device(struct se_device *dev)
  244. {
  245. struct rd_dev *rd_dev = RD_DEV(dev);
  246. struct rd_host *rd_host = dev->se_hba->hba_ptr;
  247. int ret;
  248. if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
  249. pr_debug("Missing rd_pages= parameter\n");
  250. return -EINVAL;
  251. }
  252. ret = rd_build_device_space(rd_dev);
  253. if (ret < 0)
  254. goto fail;
  255. dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
  256. dev->dev_attrib.hw_max_sectors = UINT_MAX;
  257. dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
  258. rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
  259. pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
  260. " %u pages in %u tables, %lu total bytes\n",
  261. rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
  262. rd_dev->sg_table_count,
  263. (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
  264. return 0;
  265. fail:
  266. rd_release_device_space(rd_dev);
  267. return ret;
  268. }
  269. static void rd_free_device(struct se_device *dev)
  270. {
  271. struct rd_dev *rd_dev = RD_DEV(dev);
  272. rd_release_device_space(rd_dev);
  273. kfree(rd_dev);
  274. }
  275. static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
  276. {
  277. struct rd_dev_sg_table *sg_table;
  278. u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
  279. sizeof(struct scatterlist));
  280. i = page / sg_per_table;
  281. if (i < rd_dev->sg_table_count) {
  282. sg_table = &rd_dev->sg_table_array[i];
  283. if ((sg_table->page_start_offset <= page) &&
  284. (sg_table->page_end_offset >= page))
  285. return sg_table;
  286. }
  287. pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
  288. page);
  289. return NULL;
  290. }
  291. static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
  292. {
  293. struct rd_dev_sg_table *sg_table;
  294. u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
  295. sizeof(struct scatterlist));
  296. i = page / sg_per_table;
  297. if (i < rd_dev->sg_prot_count) {
  298. sg_table = &rd_dev->sg_prot_array[i];
  299. if ((sg_table->page_start_offset <= page) &&
  300. (sg_table->page_end_offset >= page))
  301. return sg_table;
  302. }
  303. pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
  304. page);
  305. return NULL;
  306. }
  307. static sense_reason_t
  308. rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
  309. enum dma_data_direction data_direction)
  310. {
  311. struct se_device *se_dev = cmd->se_dev;
  312. struct rd_dev *dev = RD_DEV(se_dev);
  313. struct rd_dev_sg_table *table;
  314. struct scatterlist *rd_sg;
  315. struct sg_mapping_iter m;
  316. u32 rd_offset;
  317. u32 rd_size;
  318. u32 rd_page;
  319. u32 src_len;
  320. u64 tmp;
  321. sense_reason_t rc;
  322. if (dev->rd_flags & RDF_NULLIO) {
  323. target_complete_cmd(cmd, SAM_STAT_GOOD);
  324. return 0;
  325. }
  326. tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
  327. rd_offset = do_div(tmp, PAGE_SIZE);
  328. rd_page = tmp;
  329. rd_size = cmd->data_length;
  330. table = rd_get_sg_table(dev, rd_page);
  331. if (!table)
  332. return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  333. rd_sg = &table->sg_table[rd_page - table->page_start_offset];
  334. pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
  335. dev->rd_dev_id,
  336. data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
  337. cmd->t_task_lba, rd_size, rd_page, rd_offset);
  338. if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
  339. struct rd_dev_sg_table *prot_table;
  340. struct scatterlist *prot_sg;
  341. u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
  342. u32 prot_offset, prot_page;
  343. tmp = cmd->t_task_lba * se_dev->prot_length;
  344. prot_offset = do_div(tmp, PAGE_SIZE);
  345. prot_page = tmp;
  346. prot_table = rd_get_prot_table(dev, prot_page);
  347. if (!prot_table)
  348. return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  349. prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
  350. rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
  351. prot_sg, prot_offset);
  352. if (rc)
  353. return rc;
  354. }
  355. src_len = PAGE_SIZE - rd_offset;
  356. sg_miter_start(&m, sgl, sgl_nents,
  357. data_direction == DMA_FROM_DEVICE ?
  358. SG_MITER_TO_SG : SG_MITER_FROM_SG);
  359. while (rd_size) {
  360. u32 len;
  361. void *rd_addr;
  362. sg_miter_next(&m);
  363. if (!(u32)m.length) {
  364. pr_debug("RD[%u]: invalid sgl %p len %zu\n",
  365. dev->rd_dev_id, m.addr, m.length);
  366. sg_miter_stop(&m);
  367. return TCM_INCORRECT_AMOUNT_OF_DATA;
  368. }
  369. len = min((u32)m.length, src_len);
  370. if (len > rd_size) {
  371. pr_debug("RD[%u]: size underrun page %d offset %d "
  372. "size %d\n", dev->rd_dev_id,
  373. rd_page, rd_offset, rd_size);
  374. len = rd_size;
  375. }
  376. m.consumed = len;
  377. rd_addr = sg_virt(rd_sg) + rd_offset;
  378. if (data_direction == DMA_FROM_DEVICE)
  379. memcpy(m.addr, rd_addr, len);
  380. else
  381. memcpy(rd_addr, m.addr, len);
  382. rd_size -= len;
  383. if (!rd_size)
  384. continue;
  385. src_len -= len;
  386. if (src_len) {
  387. rd_offset += len;
  388. continue;
  389. }
  390. /* rd page completed, next one please */
  391. rd_page++;
  392. rd_offset = 0;
  393. src_len = PAGE_SIZE;
  394. if (rd_page <= table->page_end_offset) {
  395. rd_sg++;
  396. continue;
  397. }
  398. table = rd_get_sg_table(dev, rd_page);
  399. if (!table) {
  400. sg_miter_stop(&m);
  401. return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  402. }
  403. /* since we increment, the first sg entry is correct */
  404. rd_sg = table->sg_table;
  405. }
  406. sg_miter_stop(&m);
  407. if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
  408. struct rd_dev_sg_table *prot_table;
  409. struct scatterlist *prot_sg;
  410. u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
  411. u32 prot_offset, prot_page;
  412. tmp = cmd->t_task_lba * se_dev->prot_length;
  413. prot_offset = do_div(tmp, PAGE_SIZE);
  414. prot_page = tmp;
  415. prot_table = rd_get_prot_table(dev, prot_page);
  416. if (!prot_table)
  417. return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  418. prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
  419. rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
  420. prot_sg, prot_offset);
  421. if (rc)
  422. return rc;
  423. }
  424. target_complete_cmd(cmd, SAM_STAT_GOOD);
  425. return 0;
  426. }
  427. enum {
  428. Opt_rd_pages, Opt_rd_nullio, Opt_err
  429. };
  430. static match_table_t tokens = {
  431. {Opt_rd_pages, "rd_pages=%d"},
  432. {Opt_rd_nullio, "rd_nullio=%d"},
  433. {Opt_err, NULL}
  434. };
  435. static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
  436. const char *page, ssize_t count)
  437. {
  438. struct rd_dev *rd_dev = RD_DEV(dev);
  439. char *orig, *ptr, *opts;
  440. substring_t args[MAX_OPT_ARGS];
  441. int ret = 0, arg, token;
  442. opts = kstrdup(page, GFP_KERNEL);
  443. if (!opts)
  444. return -ENOMEM;
  445. orig = opts;
  446. while ((ptr = strsep(&opts, ",\n")) != NULL) {
  447. if (!*ptr)
  448. continue;
  449. token = match_token(ptr, tokens, args);
  450. switch (token) {
  451. case Opt_rd_pages:
  452. match_int(args, &arg);
  453. rd_dev->rd_page_count = arg;
  454. pr_debug("RAMDISK: Referencing Page"
  455. " Count: %u\n", rd_dev->rd_page_count);
  456. rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
  457. break;
  458. case Opt_rd_nullio:
  459. match_int(args, &arg);
  460. if (arg != 1)
  461. break;
  462. pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
  463. rd_dev->rd_flags |= RDF_NULLIO;
  464. break;
  465. default:
  466. break;
  467. }
  468. }
  469. kfree(orig);
  470. return (!ret) ? count : ret;
  471. }
  472. static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
  473. {
  474. struct rd_dev *rd_dev = RD_DEV(dev);
  475. ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
  476. rd_dev->rd_dev_id);
  477. bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
  478. " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
  479. PAGE_SIZE, rd_dev->sg_table_count,
  480. !!(rd_dev->rd_flags & RDF_NULLIO));
  481. return bl;
  482. }
  483. static sector_t rd_get_blocks(struct se_device *dev)
  484. {
  485. struct rd_dev *rd_dev = RD_DEV(dev);
  486. unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
  487. dev->dev_attrib.block_size) - 1;
  488. return blocks_long;
  489. }
  490. static int rd_init_prot(struct se_device *dev)
  491. {
  492. struct rd_dev *rd_dev = RD_DEV(dev);
  493. if (!dev->dev_attrib.pi_prot_type)
  494. return 0;
  495. return rd_build_prot_space(rd_dev, dev->prot_length,
  496. dev->dev_attrib.block_size);
  497. }
  498. static void rd_free_prot(struct se_device *dev)
  499. {
  500. struct rd_dev *rd_dev = RD_DEV(dev);
  501. rd_release_prot_space(rd_dev);
  502. }
  503. static struct sbc_ops rd_sbc_ops = {
  504. .execute_rw = rd_execute_rw,
  505. };
  506. static sense_reason_t
  507. rd_parse_cdb(struct se_cmd *cmd)
  508. {
  509. return sbc_parse_cdb(cmd, &rd_sbc_ops);
  510. }
  511. DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
  512. static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
  513. &rd_mcp_dev_attrib_emulate_model_alias.attr,
  514. &rd_mcp_dev_attrib_emulate_dpo.attr,
  515. &rd_mcp_dev_attrib_emulate_fua_write.attr,
  516. &rd_mcp_dev_attrib_emulate_fua_read.attr,
  517. &rd_mcp_dev_attrib_emulate_write_cache.attr,
  518. &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
  519. &rd_mcp_dev_attrib_emulate_tas.attr,
  520. &rd_mcp_dev_attrib_emulate_tpu.attr,
  521. &rd_mcp_dev_attrib_emulate_tpws.attr,
  522. &rd_mcp_dev_attrib_emulate_caw.attr,
  523. &rd_mcp_dev_attrib_emulate_3pc.attr,
  524. &rd_mcp_dev_attrib_pi_prot_type.attr,
  525. &rd_mcp_dev_attrib_hw_pi_prot_type.attr,
  526. &rd_mcp_dev_attrib_pi_prot_format.attr,
  527. &rd_mcp_dev_attrib_enforce_pr_isids.attr,
  528. &rd_mcp_dev_attrib_is_nonrot.attr,
  529. &rd_mcp_dev_attrib_emulate_rest_reord.attr,
  530. &rd_mcp_dev_attrib_force_pr_aptpl.attr,
  531. &rd_mcp_dev_attrib_hw_block_size.attr,
  532. &rd_mcp_dev_attrib_block_size.attr,
  533. &rd_mcp_dev_attrib_hw_max_sectors.attr,
  534. &rd_mcp_dev_attrib_optimal_sectors.attr,
  535. &rd_mcp_dev_attrib_hw_queue_depth.attr,
  536. &rd_mcp_dev_attrib_queue_depth.attr,
  537. &rd_mcp_dev_attrib_max_unmap_lba_count.attr,
  538. &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
  539. &rd_mcp_dev_attrib_unmap_granularity.attr,
  540. &rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
  541. &rd_mcp_dev_attrib_max_write_same_len.attr,
  542. NULL,
  543. };
  544. static struct se_subsystem_api rd_mcp_template = {
  545. .name = "rd_mcp",
  546. .inquiry_prod = "RAMDISK-MCP",
  547. .inquiry_rev = RD_MCP_VERSION,
  548. .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
  549. .attach_hba = rd_attach_hba,
  550. .detach_hba = rd_detach_hba,
  551. .alloc_device = rd_alloc_device,
  552. .configure_device = rd_configure_device,
  553. .free_device = rd_free_device,
  554. .parse_cdb = rd_parse_cdb,
  555. .set_configfs_dev_params = rd_set_configfs_dev_params,
  556. .show_configfs_dev_params = rd_show_configfs_dev_params,
  557. .get_device_type = sbc_get_device_type,
  558. .get_blocks = rd_get_blocks,
  559. .init_prot = rd_init_prot,
  560. .free_prot = rd_free_prot,
  561. };
  562. int __init rd_module_init(void)
  563. {
  564. struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
  565. int ret;
  566. target_core_setup_sub_cits(&rd_mcp_template);
  567. tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;
  568. ret = transport_subsystem_register(&rd_mcp_template);
  569. if (ret < 0) {
  570. return ret;
  571. }
  572. return 0;
  573. }
  574. void rd_module_exit(void)
  575. {
  576. transport_subsystem_release(&rd_mcp_template);
  577. }