target_core_xcopy.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059
  1. /*******************************************************************************
  2. * Filename: target_core_xcopy.c
  3. *
  4. * This file contains support for SPC-4 Extended-Copy offload with generic
  5. * TCM backends.
  6. *
  7. * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
  8. *
  9. * Author:
  10. * Nicholas A. Bellinger <nab@daterainc.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. ******************************************************************************/
  23. #include <linux/slab.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/list.h>
  26. #include <linux/configfs.h>
  27. #include <scsi/scsi.h>
  28. #include <scsi/scsi_cmnd.h>
  29. #include <asm/unaligned.h>
  30. #include <target/target_core_base.h>
  31. #include <target/target_core_backend.h>
  32. #include <target/target_core_fabric.h>
  33. #include <target/target_core_configfs.h>
  34. #include "target_core_internal.h"
  35. #include "target_core_pr.h"
  36. #include "target_core_ua.h"
  37. #include "target_core_xcopy.h"
  38. static struct workqueue_struct *xcopy_wq = NULL;
  39. static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
  40. {
  41. int off = 0;
  42. buf[off++] = (0x6 << 4);
  43. buf[off++] = 0x01;
  44. buf[off++] = 0x40;
  45. buf[off] = (0x5 << 4);
  46. spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
  47. return 0;
  48. }
  49. static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
  50. bool src)
  51. {
  52. struct se_device *se_dev;
  53. struct configfs_subsystem *subsys = target_core_subsystem[0];
  54. unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
  55. int rc;
  56. if (src)
  57. dev_wwn = &xop->dst_tid_wwn[0];
  58. else
  59. dev_wwn = &xop->src_tid_wwn[0];
  60. mutex_lock(&g_device_mutex);
  61. list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
  62. if (!se_dev->dev_attrib.emulate_3pc)
  63. continue;
  64. memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
  65. target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
  66. rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
  67. if (rc != 0)
  68. continue;
  69. if (src) {
  70. xop->dst_dev = se_dev;
  71. pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
  72. " se_dev\n", xop->dst_dev);
  73. } else {
  74. xop->src_dev = se_dev;
  75. pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
  76. " se_dev\n", xop->src_dev);
  77. }
  78. rc = configfs_depend_item(subsys,
  79. &se_dev->dev_group.cg_item);
  80. if (rc != 0) {
  81. pr_err("configfs_depend_item attempt failed:"
  82. " %d for se_dev: %p\n", rc, se_dev);
  83. mutex_unlock(&g_device_mutex);
  84. return rc;
  85. }
  86. pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p"
  87. " se_dev->se_dev_group: %p\n", subsys, se_dev,
  88. &se_dev->dev_group);
  89. mutex_unlock(&g_device_mutex);
  90. return 0;
  91. }
  92. mutex_unlock(&g_device_mutex);
  93. pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
  94. return -EINVAL;
  95. }
  96. static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
  97. unsigned char *p, bool src)
  98. {
  99. unsigned char *desc = p;
  100. unsigned short ript;
  101. u8 desig_len;
  102. /*
  103. * Extract RELATIVE INITIATOR PORT IDENTIFIER
  104. */
  105. ript = get_unaligned_be16(&desc[2]);
  106. pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
  107. /*
  108. * Check for supported code set, association, and designator type
  109. */
  110. if ((desc[4] & 0x0f) != 0x1) {
  111. pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
  112. return -EINVAL;
  113. }
  114. if ((desc[5] & 0x30) != 0x00) {
  115. pr_err("XCOPY 0xe4: association other than LUN not supported\n");
  116. return -EINVAL;
  117. }
  118. if ((desc[5] & 0x0f) != 0x3) {
  119. pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
  120. (desc[5] & 0x0f));
  121. return -EINVAL;
  122. }
  123. /*
  124. * Check for matching 16 byte length for NAA IEEE Registered Extended
  125. * Assigned designator
  126. */
  127. desig_len = desc[7];
  128. if (desig_len != 16) {
  129. pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
  130. return -EINVAL;
  131. }
  132. pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
  133. /*
  134. * Check for NAA IEEE Registered Extended Assigned header..
  135. */
  136. if ((desc[8] & 0xf0) != 0x60) {
  137. pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
  138. (desc[8] & 0xf0));
  139. return -EINVAL;
  140. }
  141. if (src) {
  142. memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
  143. /*
  144. * Determine if the source designator matches the local device
  145. */
  146. if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
  147. XCOPY_NAA_IEEE_REGEX_LEN)) {
  148. xop->op_origin = XCOL_SOURCE_RECV_OP;
  149. xop->src_dev = se_cmd->se_dev;
  150. pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
  151. " received xop\n", xop->src_dev);
  152. }
  153. } else {
  154. memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
  155. /*
  156. * Determine if the destination designator matches the local device
  157. */
  158. if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
  159. XCOPY_NAA_IEEE_REGEX_LEN)) {
  160. xop->op_origin = XCOL_DEST_RECV_OP;
  161. xop->dst_dev = se_cmd->se_dev;
  162. pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
  163. " received xop\n", xop->dst_dev);
  164. }
  165. }
  166. return 0;
  167. }
  168. static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
  169. struct xcopy_op *xop, unsigned char *p,
  170. unsigned short tdll)
  171. {
  172. struct se_device *local_dev = se_cmd->se_dev;
  173. unsigned char *desc = p;
  174. int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
  175. unsigned short start = 0;
  176. bool src = true;
  177. if (offset != 0) {
  178. pr_err("XCOPY target descriptor list length is not"
  179. " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
  180. return -EINVAL;
  181. }
  182. if (tdll > 64) {
  183. pr_err("XCOPY target descriptor supports a maximum"
  184. " two src/dest descriptors, tdll: %hu too large..\n", tdll);
  185. return -EINVAL;
  186. }
  187. /*
  188. * Generate an IEEE Registered Extended designator based upon the
  189. * se_device the XCOPY was received upon..
  190. */
  191. memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
  192. target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]);
  193. while (start < tdll) {
  194. /*
  195. * Check target descriptor identification with 0xE4 type with
  196. * use VPD 0x83 WWPN matching ..
  197. */
  198. switch (desc[0]) {
  199. case 0xe4:
  200. rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
  201. &desc[0], src);
  202. if (rc != 0)
  203. goto out;
  204. /*
  205. * Assume target descriptors are in source -> destination order..
  206. */
  207. if (src)
  208. src = false;
  209. else
  210. src = true;
  211. start += XCOPY_TARGET_DESC_LEN;
  212. desc += XCOPY_TARGET_DESC_LEN;
  213. ret++;
  214. break;
  215. default:
  216. pr_err("XCOPY unsupported descriptor type code:"
  217. " 0x%02x\n", desc[0]);
  218. goto out;
  219. }
  220. }
  221. if (xop->op_origin == XCOL_SOURCE_RECV_OP)
  222. rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
  223. else
  224. rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
  225. if (rc < 0)
  226. goto out;
  227. pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
  228. xop->src_dev, &xop->src_tid_wwn[0]);
  229. pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
  230. xop->dst_dev, &xop->dst_tid_wwn[0]);
  231. return ret;
  232. out:
  233. return -EINVAL;
  234. }
  235. static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop,
  236. unsigned char *p)
  237. {
  238. unsigned char *desc = p;
  239. int dc = (desc[1] & 0x02);
  240. unsigned short desc_len;
  241. desc_len = get_unaligned_be16(&desc[2]);
  242. if (desc_len != 0x18) {
  243. pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
  244. " %hu\n", desc_len);
  245. return -EINVAL;
  246. }
  247. xop->stdi = get_unaligned_be16(&desc[4]);
  248. xop->dtdi = get_unaligned_be16(&desc[6]);
  249. pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
  250. desc_len, xop->stdi, xop->dtdi, dc);
  251. xop->nolb = get_unaligned_be16(&desc[10]);
  252. xop->src_lba = get_unaligned_be64(&desc[12]);
  253. xop->dst_lba = get_unaligned_be64(&desc[20]);
  254. pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
  255. xop->nolb, (unsigned long long)xop->src_lba,
  256. (unsigned long long)xop->dst_lba);
  257. if (dc != 0) {
  258. xop->dbl = (desc[29] & 0xff) << 16;
  259. xop->dbl |= (desc[30] & 0xff) << 8;
  260. xop->dbl |= desc[31] & 0xff;
  261. pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
  262. }
  263. return 0;
  264. }
  265. static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
  266. struct xcopy_op *xop, unsigned char *p,
  267. unsigned int sdll)
  268. {
  269. unsigned char *desc = p;
  270. unsigned int start = 0;
  271. int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
  272. if (offset != 0) {
  273. pr_err("XCOPY segment descriptor list length is not"
  274. " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
  275. return -EINVAL;
  276. }
  277. while (start < sdll) {
  278. /*
  279. * Check segment descriptor type code for block -> block
  280. */
  281. switch (desc[0]) {
  282. case 0x02:
  283. rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc);
  284. if (rc < 0)
  285. goto out;
  286. ret++;
  287. start += XCOPY_SEGMENT_DESC_LEN;
  288. desc += XCOPY_SEGMENT_DESC_LEN;
  289. break;
  290. default:
  291. pr_err("XCOPY unsupported segment descriptor"
  292. "type: 0x%02x\n", desc[0]);
  293. goto out;
  294. }
  295. }
  296. return ret;
  297. out:
  298. return -EINVAL;
  299. }
  300. /*
  301. * Start xcopy_pt ops
  302. */
  303. struct xcopy_pt_cmd {
  304. bool remote_port;
  305. struct se_cmd se_cmd;
  306. struct xcopy_op *xcopy_op;
  307. struct completion xpt_passthrough_sem;
  308. unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
  309. };
  310. static struct se_port xcopy_pt_port;
  311. static struct se_portal_group xcopy_pt_tpg;
  312. static struct se_session xcopy_pt_sess;
  313. static struct se_node_acl xcopy_pt_nacl;
  314. static char *xcopy_pt_get_fabric_name(void)
  315. {
  316. return "xcopy-pt";
  317. }
  318. static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd)
  319. {
  320. return 0;
  321. }
  322. static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
  323. {
  324. return 0;
  325. }
  326. static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
  327. {
  328. struct configfs_subsystem *subsys = target_core_subsystem[0];
  329. struct se_device *remote_dev;
  330. if (xop->op_origin == XCOL_SOURCE_RECV_OP)
  331. remote_dev = xop->dst_dev;
  332. else
  333. remote_dev = xop->src_dev;
  334. pr_debug("Calling configfs_undepend_item for subsys: %p"
  335. " remote_dev: %p remote_dev->dev_group: %p\n",
  336. subsys, remote_dev, &remote_dev->dev_group.cg_item);
  337. configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item);
  338. }
  339. static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
  340. {
  341. struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
  342. struct xcopy_pt_cmd, se_cmd);
  343. kfree(xpt_cmd);
  344. }
  345. static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
  346. {
  347. struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
  348. struct xcopy_pt_cmd, se_cmd);
  349. complete(&xpt_cmd->xpt_passthrough_sem);
  350. return 0;
  351. }
  352. static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
  353. {
  354. return 0;
  355. }
  356. static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd)
  357. {
  358. return 0;
  359. }
  360. static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
  361. {
  362. return 0;
  363. }
  364. static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
  365. {
  366. return 0;
  367. }
  368. static const struct target_core_fabric_ops xcopy_pt_tfo = {
  369. .get_fabric_name = xcopy_pt_get_fabric_name,
  370. .get_task_tag = xcopy_pt_get_tag,
  371. .get_cmd_state = xcopy_pt_get_cmd_state,
  372. .release_cmd = xcopy_pt_release_cmd,
  373. .check_stop_free = xcopy_pt_check_stop_free,
  374. .write_pending = xcopy_pt_write_pending,
  375. .write_pending_status = xcopy_pt_write_pending_status,
  376. .queue_data_in = xcopy_pt_queue_data_in,
  377. .queue_status = xcopy_pt_queue_status,
  378. };
  379. /*
  380. * End xcopy_pt_ops
  381. */
  382. int target_xcopy_setup_pt(void)
  383. {
  384. xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
  385. if (!xcopy_wq) {
  386. pr_err("Unable to allocate xcopy_wq\n");
  387. return -ENOMEM;
  388. }
  389. memset(&xcopy_pt_port, 0, sizeof(struct se_port));
  390. INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list);
  391. INIT_LIST_HEAD(&xcopy_pt_port.sep_list);
  392. mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex);
  393. memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
  394. INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
  395. INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
  396. INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
  397. xcopy_pt_port.sep_tpg = &xcopy_pt_tpg;
  398. xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
  399. memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
  400. INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
  401. INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
  402. memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
  403. INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
  404. INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
  405. xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
  406. xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
  407. xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
  408. xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
  409. return 0;
  410. }
  411. void target_xcopy_release_pt(void)
  412. {
  413. if (xcopy_wq)
  414. destroy_workqueue(xcopy_wq);
  415. }
  416. static void target_xcopy_setup_pt_port(
  417. struct xcopy_pt_cmd *xpt_cmd,
  418. struct xcopy_op *xop,
  419. bool remote_port)
  420. {
  421. struct se_cmd *ec_cmd = xop->xop_se_cmd;
  422. struct se_cmd *pt_cmd = &xpt_cmd->se_cmd;
  423. if (xop->op_origin == XCOL_SOURCE_RECV_OP) {
  424. /*
  425. * Honor destination port reservations for X-COPY PUSH emulation
  426. * when CDB is received on local source port, and READs blocks to
  427. * WRITE on remote destination port.
  428. */
  429. if (remote_port) {
  430. xpt_cmd->remote_port = remote_port;
  431. pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
  432. pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to"
  433. " cmd->se_lun->lun_sep for X-COPY data PUSH\n",
  434. pt_cmd->se_lun->lun_sep);
  435. } else {
  436. pt_cmd->se_lun = ec_cmd->se_lun;
  437. pt_cmd->se_dev = ec_cmd->se_dev;
  438. pr_debug("Honoring local SRC port from ec_cmd->se_dev:"
  439. " %p\n", pt_cmd->se_dev);
  440. pt_cmd->se_lun = ec_cmd->se_lun;
  441. pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n",
  442. pt_cmd->se_lun);
  443. }
  444. } else {
  445. /*
  446. * Honor source port reservation for X-COPY PULL emulation
  447. * when CDB is received on local desintation port, and READs
  448. * blocks from the remote source port to WRITE on local
  449. * destination port.
  450. */
  451. if (remote_port) {
  452. xpt_cmd->remote_port = remote_port;
  453. pt_cmd->se_lun->lun_sep = &xcopy_pt_port;
  454. pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to"
  455. " cmd->se_lun->lun_sep for X-COPY data PULL\n",
  456. pt_cmd->se_lun->lun_sep);
  457. } else {
  458. pt_cmd->se_lun = ec_cmd->se_lun;
  459. pt_cmd->se_dev = ec_cmd->se_dev;
  460. pr_debug("Honoring local DST port from ec_cmd->se_dev:"
  461. " %p\n", pt_cmd->se_dev);
  462. pt_cmd->se_lun = ec_cmd->se_lun;
  463. pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n",
  464. pt_cmd->se_lun);
  465. }
  466. }
  467. }
  468. static void target_xcopy_init_pt_lun(struct se_device *se_dev,
  469. struct se_cmd *pt_cmd, bool remote_port)
  470. {
  471. /*
  472. * Don't allocate + init an pt_cmd->se_lun if honoring local port for
  473. * reservations. The pt_cmd->se_lun pointer will be setup from within
  474. * target_xcopy_setup_pt_port()
  475. */
  476. if (remote_port) {
  477. pr_debug("Setup emulated se_dev: %p from se_dev\n",
  478. pt_cmd->se_dev);
  479. pt_cmd->se_lun = &se_dev->xcopy_lun;
  480. pt_cmd->se_dev = se_dev;
  481. }
  482. pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  483. }
  484. static int target_xcopy_setup_pt_cmd(
  485. struct xcopy_pt_cmd *xpt_cmd,
  486. struct xcopy_op *xop,
  487. struct se_device *se_dev,
  488. unsigned char *cdb,
  489. bool remote_port,
  490. bool alloc_mem)
  491. {
  492. struct se_cmd *cmd = &xpt_cmd->se_cmd;
  493. sense_reason_t sense_rc;
  494. int ret = 0, rc;
  495. /*
  496. * Setup LUN+port to honor reservations based upon xop->op_origin for
  497. * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
  498. */
  499. target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
  500. xpt_cmd->xcopy_op = xop;
  501. target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
  502. sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
  503. if (sense_rc) {
  504. ret = -EINVAL;
  505. goto out;
  506. }
  507. if (alloc_mem) {
  508. rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
  509. cmd->data_length, false);
  510. if (rc < 0) {
  511. ret = rc;
  512. goto out;
  513. }
  514. /*
  515. * Set this bit so that transport_free_pages() allows the
  516. * caller to release SGLs + physical memory allocated by
  517. * transport_generic_get_mem()..
  518. */
  519. cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
  520. } else {
  521. /*
  522. * Here the previously allocated SGLs for the internal READ
  523. * are mapped zero-copy to the internal WRITE.
  524. */
  525. sense_rc = transport_generic_map_mem_to_cmd(cmd,
  526. xop->xop_data_sg, xop->xop_data_nents,
  527. NULL, 0);
  528. if (sense_rc) {
  529. ret = -EINVAL;
  530. goto out;
  531. }
  532. pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
  533. " %u\n", cmd->t_data_sg, cmd->t_data_nents);
  534. }
  535. return 0;
  536. out:
  537. return ret;
  538. }
  539. static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
  540. {
  541. struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
  542. sense_reason_t sense_rc;
  543. sense_rc = transport_generic_new_cmd(se_cmd);
  544. if (sense_rc)
  545. return -EINVAL;
  546. if (se_cmd->data_direction == DMA_TO_DEVICE)
  547. target_execute_cmd(se_cmd);
  548. wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
  549. pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
  550. se_cmd->scsi_status);
  551. return (se_cmd->scsi_status) ? -EINVAL : 0;
  552. }
  553. static int target_xcopy_read_source(
  554. struct se_cmd *ec_cmd,
  555. struct xcopy_op *xop,
  556. struct se_device *src_dev,
  557. sector_t src_lba,
  558. u32 src_sectors)
  559. {
  560. struct xcopy_pt_cmd *xpt_cmd;
  561. struct se_cmd *se_cmd;
  562. u32 length = (src_sectors * src_dev->dev_attrib.block_size);
  563. int rc;
  564. unsigned char cdb[16];
  565. bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
  566. xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
  567. if (!xpt_cmd) {
  568. pr_err("Unable to allocate xcopy_pt_cmd\n");
  569. return -ENOMEM;
  570. }
  571. init_completion(&xpt_cmd->xpt_passthrough_sem);
  572. se_cmd = &xpt_cmd->se_cmd;
  573. memset(&cdb[0], 0, 16);
  574. cdb[0] = READ_16;
  575. put_unaligned_be64(src_lba, &cdb[2]);
  576. put_unaligned_be32(src_sectors, &cdb[10]);
  577. pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
  578. (unsigned long long)src_lba, src_sectors, length);
  579. transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
  580. DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
  581. xop->src_pt_cmd = xpt_cmd;
  582. rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
  583. remote_port, true);
  584. if (rc < 0) {
  585. transport_generic_free_cmd(se_cmd, 0);
  586. return rc;
  587. }
  588. xop->xop_data_sg = se_cmd->t_data_sg;
  589. xop->xop_data_nents = se_cmd->t_data_nents;
  590. pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
  591. " memory\n", xop->xop_data_sg, xop->xop_data_nents);
  592. rc = target_xcopy_issue_pt_cmd(xpt_cmd);
  593. if (rc < 0) {
  594. transport_generic_free_cmd(se_cmd, 0);
  595. return rc;
  596. }
  597. /*
  598. * Clear off the allocated t_data_sg, that has been saved for
  599. * zero-copy WRITE submission reuse in struct xcopy_op..
  600. */
  601. se_cmd->t_data_sg = NULL;
  602. se_cmd->t_data_nents = 0;
  603. return 0;
  604. }
  605. static int target_xcopy_write_destination(
  606. struct se_cmd *ec_cmd,
  607. struct xcopy_op *xop,
  608. struct se_device *dst_dev,
  609. sector_t dst_lba,
  610. u32 dst_sectors)
  611. {
  612. struct xcopy_pt_cmd *xpt_cmd;
  613. struct se_cmd *se_cmd;
  614. u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
  615. int rc;
  616. unsigned char cdb[16];
  617. bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
  618. xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
  619. if (!xpt_cmd) {
  620. pr_err("Unable to allocate xcopy_pt_cmd\n");
  621. return -ENOMEM;
  622. }
  623. init_completion(&xpt_cmd->xpt_passthrough_sem);
  624. se_cmd = &xpt_cmd->se_cmd;
  625. memset(&cdb[0], 0, 16);
  626. cdb[0] = WRITE_16;
  627. put_unaligned_be64(dst_lba, &cdb[2]);
  628. put_unaligned_be32(dst_sectors, &cdb[10]);
  629. pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
  630. (unsigned long long)dst_lba, dst_sectors, length);
  631. transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
  632. DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
  633. xop->dst_pt_cmd = xpt_cmd;
  634. rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
  635. remote_port, false);
  636. if (rc < 0) {
  637. struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
  638. /*
  639. * If the failure happened before the t_mem_list hand-off in
  640. * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
  641. * core releases this memory on error during X-COPY WRITE I/O.
  642. */
  643. src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
  644. src_cmd->t_data_sg = xop->xop_data_sg;
  645. src_cmd->t_data_nents = xop->xop_data_nents;
  646. transport_generic_free_cmd(se_cmd, 0);
  647. return rc;
  648. }
  649. rc = target_xcopy_issue_pt_cmd(xpt_cmd);
  650. if (rc < 0) {
  651. se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
  652. transport_generic_free_cmd(se_cmd, 0);
  653. return rc;
  654. }
  655. return 0;
  656. }
  657. static void target_xcopy_do_work(struct work_struct *work)
  658. {
  659. struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
  660. struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
  661. struct se_cmd *ec_cmd = xop->xop_se_cmd;
  662. sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba;
  663. unsigned int max_sectors;
  664. int rc;
  665. unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0;
  666. end_lba = src_lba + nolb;
  667. /*
  668. * Break up XCOPY I/O into hw_max_sectors sized I/O based on the
  669. * smallest max_sectors between src_dev + dev_dev, or
  670. */
  671. max_sectors = min(src_dev->dev_attrib.hw_max_sectors,
  672. dst_dev->dev_attrib.hw_max_sectors);
  673. max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS);
  674. max_nolb = min_t(u16, max_sectors, ((u16)(~0U)));
  675. pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n",
  676. nolb, max_nolb, (unsigned long long)end_lba);
  677. pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n",
  678. (unsigned long long)src_lba, (unsigned long long)dst_lba);
  679. while (src_lba < end_lba) {
  680. cur_nolb = min(nolb, max_nolb);
  681. pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
  682. " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
  683. rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb);
  684. if (rc < 0)
  685. goto out;
  686. src_lba += cur_nolb;
  687. pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n",
  688. (unsigned long long)src_lba);
  689. pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu,"
  690. " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb);
  691. rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
  692. dst_lba, cur_nolb);
  693. if (rc < 0) {
  694. transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
  695. goto out;
  696. }
  697. dst_lba += cur_nolb;
  698. pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
  699. (unsigned long long)dst_lba);
  700. copied_nolb += cur_nolb;
  701. nolb -= cur_nolb;
  702. transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
  703. xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
  704. transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
  705. }
  706. xcopy_pt_undepend_remotedev(xop);
  707. kfree(xop);
  708. pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
  709. (unsigned long long)src_lba, (unsigned long long)dst_lba);
  710. pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n",
  711. copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size);
  712. pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n");
  713. target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
  714. return;
  715. out:
  716. xcopy_pt_undepend_remotedev(xop);
  717. kfree(xop);
  718. pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
  719. ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
  720. target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
  721. }
  722. sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
  723. {
  724. struct se_device *dev = se_cmd->se_dev;
  725. struct xcopy_op *xop = NULL;
  726. unsigned char *p = NULL, *seg_desc;
  727. unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
  728. sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
  729. int rc;
  730. unsigned short tdll;
  731. if (!dev->dev_attrib.emulate_3pc) {
  732. pr_err("EXTENDED_COPY operation explicitly disabled\n");
  733. return TCM_UNSUPPORTED_SCSI_OPCODE;
  734. }
  735. sa = se_cmd->t_task_cdb[1] & 0x1f;
  736. if (sa != 0x00) {
  737. pr_err("EXTENDED_COPY(LID4) not supported\n");
  738. return TCM_UNSUPPORTED_SCSI_OPCODE;
  739. }
  740. xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
  741. if (!xop) {
  742. pr_err("Unable to allocate xcopy_op\n");
  743. return TCM_OUT_OF_RESOURCES;
  744. }
  745. xop->xop_se_cmd = se_cmd;
  746. p = transport_kmap_data_sg(se_cmd);
  747. if (!p) {
  748. pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
  749. kfree(xop);
  750. return TCM_OUT_OF_RESOURCES;
  751. }
  752. list_id = p[0];
  753. list_id_usage = (p[1] & 0x18) >> 3;
  754. /*
  755. * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
  756. */
  757. tdll = get_unaligned_be16(&p[2]);
  758. sdll = get_unaligned_be32(&p[8]);
  759. inline_dl = get_unaligned_be32(&p[12]);
  760. if (inline_dl != 0) {
  761. pr_err("XCOPY with non zero inline data length\n");
  762. goto out;
  763. }
  764. pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
  765. " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
  766. tdll, sdll, inline_dl);
  767. rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
  768. if (rc <= 0)
  769. goto out;
  770. if (xop->src_dev->dev_attrib.block_size !=
  771. xop->dst_dev->dev_attrib.block_size) {
  772. pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
  773. " block_size: %u currently unsupported\n",
  774. xop->src_dev->dev_attrib.block_size,
  775. xop->dst_dev->dev_attrib.block_size);
  776. xcopy_pt_undepend_remotedev(xop);
  777. ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  778. goto out;
  779. }
  780. pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
  781. rc * XCOPY_TARGET_DESC_LEN);
  782. seg_desc = &p[16];
  783. seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
  784. rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
  785. if (rc <= 0) {
  786. xcopy_pt_undepend_remotedev(xop);
  787. goto out;
  788. }
  789. transport_kunmap_data_sg(se_cmd);
  790. pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
  791. rc * XCOPY_SEGMENT_DESC_LEN);
  792. INIT_WORK(&xop->xop_work, target_xcopy_do_work);
  793. queue_work(xcopy_wq, &xop->xop_work);
  794. return TCM_NO_SENSE;
  795. out:
  796. if (p)
  797. transport_kunmap_data_sg(se_cmd);
  798. kfree(xop);
  799. return ret;
  800. }
  801. static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
  802. {
  803. unsigned char *p;
  804. p = transport_kmap_data_sg(se_cmd);
  805. if (!p) {
  806. pr_err("transport_kmap_data_sg failed in"
  807. " target_rcr_operating_parameters\n");
  808. return TCM_OUT_OF_RESOURCES;
  809. }
  810. if (se_cmd->data_length < 54) {
  811. pr_err("Receive Copy Results Op Parameters length"
  812. " too small: %u\n", se_cmd->data_length);
  813. transport_kunmap_data_sg(se_cmd);
  814. return TCM_INVALID_CDB_FIELD;
  815. }
  816. /*
  817. * Set SNLID=1 (Supports no List ID)
  818. */
  819. p[4] = 0x1;
  820. /*
  821. * MAXIMUM TARGET DESCRIPTOR COUNT
  822. */
  823. put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
  824. /*
  825. * MAXIMUM SEGMENT DESCRIPTOR COUNT
  826. */
  827. put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
  828. /*
  829. * MAXIMUM DESCRIPTOR LIST LENGTH
  830. */
  831. put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
  832. /*
  833. * MAXIMUM SEGMENT LENGTH
  834. */
  835. put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
  836. /*
  837. * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
  838. */
  839. put_unaligned_be32(0x0, &p[20]);
  840. /*
  841. * HELD DATA LIMIT
  842. */
  843. put_unaligned_be32(0x0, &p[24]);
  844. /*
  845. * MAXIMUM STREAM DEVICE TRANSFER SIZE
  846. */
  847. put_unaligned_be32(0x0, &p[28]);
  848. /*
  849. * TOTAL CONCURRENT COPIES
  850. */
  851. put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
  852. /*
  853. * MAXIMUM CONCURRENT COPIES
  854. */
  855. p[36] = RCR_OP_MAX_CONCURR_COPIES;
  856. /*
  857. * DATA SEGMENT GRANULARITY (log 2)
  858. */
  859. p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
  860. /*
  861. * INLINE DATA GRANULARITY log 2)
  862. */
  863. p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
  864. /*
  865. * HELD DATA GRANULARITY
  866. */
  867. p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
  868. /*
  869. * IMPLEMENTED DESCRIPTOR LIST LENGTH
  870. */
  871. p[43] = 0x2;
  872. /*
  873. * List of implemented descriptor type codes (ordered)
  874. */
  875. p[44] = 0x02; /* Copy Block to Block device */
  876. p[45] = 0xe4; /* Identification descriptor target descriptor */
  877. /*
  878. * AVAILABLE DATA (n-3)
  879. */
  880. put_unaligned_be32(42, &p[0]);
  881. transport_kunmap_data_sg(se_cmd);
  882. target_complete_cmd(se_cmd, GOOD);
  883. return TCM_NO_SENSE;
  884. }
  885. sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
  886. {
  887. unsigned char *cdb = &se_cmd->t_task_cdb[0];
  888. int sa = (cdb[1] & 0x1f), list_id = cdb[2];
  889. sense_reason_t rc = TCM_NO_SENSE;
  890. pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
  891. " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
  892. if (list_id != 0) {
  893. pr_err("Receive Copy Results with non zero list identifier"
  894. " not supported\n");
  895. return TCM_INVALID_CDB_FIELD;
  896. }
  897. switch (sa) {
  898. case RCR_SA_OPERATING_PARAMETERS:
  899. rc = target_rcr_operating_parameters(se_cmd);
  900. break;
  901. case RCR_SA_COPY_STATUS:
  902. case RCR_SA_RECEIVE_DATA:
  903. case RCR_SA_FAILED_SEGMENT_DETAILS:
  904. default:
  905. pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
  906. return TCM_INVALID_CDB_FIELD;
  907. }
  908. return rc;
  909. }