iscsi_target_util.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398
  1. /*******************************************************************************
  2. * This file contains the iSCSI Target specific utility functions.
  3. *
  4. * (c) Copyright 2007-2013 Datera, Inc.
  5. *
  6. * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. ******************************************************************************/
  18. #include <linux/list.h>
  19. #include <linux/percpu_ida.h>
  20. #include <net/ipv6.h> /* ipv6_addr_equal() */
  21. #include <scsi/scsi_tcq.h>
  22. #include <scsi/iscsi_proto.h>
  23. #include <target/target_core_base.h>
  24. #include <target/target_core_fabric.h>
  25. #include <target/iscsi/iscsi_transport.h>
  26. #include <target/iscsi/iscsi_target_core.h>
  27. #include "iscsi_target_parameters.h"
  28. #include "iscsi_target_seq_pdu_list.h"
  29. #include "iscsi_target_datain_values.h"
  30. #include "iscsi_target_erl0.h"
  31. #include "iscsi_target_erl1.h"
  32. #include "iscsi_target_erl2.h"
  33. #include "iscsi_target_tpg.h"
  34. #include "iscsi_target_util.h"
  35. #include "iscsi_target.h"
  36. #define PRINT_BUFF(buff, len) \
  37. { \
  38. int zzz; \
  39. \
  40. pr_debug("%d:\n", __LINE__); \
  41. for (zzz = 0; zzz < len; zzz++) { \
  42. if (zzz % 16 == 0) { \
  43. if (zzz) \
  44. pr_debug("\n"); \
  45. pr_debug("%4i: ", zzz); \
  46. } \
  47. pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
  48. } \
  49. if ((len + 1) % 16) \
  50. pr_debug("\n"); \
  51. }
  52. extern struct list_head g_tiqn_list;
  53. extern spinlock_t tiqn_lock;
  54. /*
  55. * Called with cmd->r2t_lock held.
  56. */
  57. int iscsit_add_r2t_to_list(
  58. struct iscsi_cmd *cmd,
  59. u32 offset,
  60. u32 xfer_len,
  61. int recovery,
  62. u32 r2t_sn)
  63. {
  64. struct iscsi_r2t *r2t;
  65. r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
  66. if (!r2t) {
  67. pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
  68. return -1;
  69. }
  70. INIT_LIST_HEAD(&r2t->r2t_list);
  71. r2t->recovery_r2t = recovery;
  72. r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
  73. r2t->offset = offset;
  74. r2t->xfer_len = xfer_len;
  75. list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
  76. spin_unlock_bh(&cmd->r2t_lock);
  77. iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
  78. spin_lock_bh(&cmd->r2t_lock);
  79. return 0;
  80. }
  81. struct iscsi_r2t *iscsit_get_r2t_for_eos(
  82. struct iscsi_cmd *cmd,
  83. u32 offset,
  84. u32 length)
  85. {
  86. struct iscsi_r2t *r2t;
  87. spin_lock_bh(&cmd->r2t_lock);
  88. list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
  89. if ((r2t->offset <= offset) &&
  90. (r2t->offset + r2t->xfer_len) >= (offset + length)) {
  91. spin_unlock_bh(&cmd->r2t_lock);
  92. return r2t;
  93. }
  94. }
  95. spin_unlock_bh(&cmd->r2t_lock);
  96. pr_err("Unable to locate R2T for Offset: %u, Length:"
  97. " %u\n", offset, length);
  98. return NULL;
  99. }
  100. struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
  101. {
  102. struct iscsi_r2t *r2t;
  103. spin_lock_bh(&cmd->r2t_lock);
  104. list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
  105. if (!r2t->sent_r2t) {
  106. spin_unlock_bh(&cmd->r2t_lock);
  107. return r2t;
  108. }
  109. }
  110. spin_unlock_bh(&cmd->r2t_lock);
  111. pr_err("Unable to locate next R2T to send for ITT:"
  112. " 0x%08x.\n", cmd->init_task_tag);
  113. return NULL;
  114. }
  115. /*
  116. * Called with cmd->r2t_lock held.
  117. */
  118. void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
  119. {
  120. list_del(&r2t->r2t_list);
  121. kmem_cache_free(lio_r2t_cache, r2t);
  122. }
  123. void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
  124. {
  125. struct iscsi_r2t *r2t, *r2t_tmp;
  126. spin_lock_bh(&cmd->r2t_lock);
  127. list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
  128. iscsit_free_r2t(r2t, cmd);
  129. spin_unlock_bh(&cmd->r2t_lock);
  130. }
  131. /*
  132. * May be called from software interrupt (timer) context for allocating
  133. * iSCSI NopINs.
  134. */
  135. struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
  136. {
  137. struct iscsi_cmd *cmd;
  138. struct se_session *se_sess = conn->sess->se_sess;
  139. int size, tag;
  140. tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state);
  141. if (tag < 0)
  142. return NULL;
  143. size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size;
  144. cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size));
  145. memset(cmd, 0, size);
  146. cmd->se_cmd.map_tag = tag;
  147. cmd->conn = conn;
  148. cmd->data_direction = DMA_NONE;
  149. INIT_LIST_HEAD(&cmd->i_conn_node);
  150. INIT_LIST_HEAD(&cmd->datain_list);
  151. INIT_LIST_HEAD(&cmd->cmd_r2t_list);
  152. spin_lock_init(&cmd->datain_lock);
  153. spin_lock_init(&cmd->dataout_timeout_lock);
  154. spin_lock_init(&cmd->istate_lock);
  155. spin_lock_init(&cmd->error_lock);
  156. spin_lock_init(&cmd->r2t_lock);
  157. timer_setup(&cmd->dataout_timer, iscsit_handle_dataout_timeout, 0);
  158. return cmd;
  159. }
  160. EXPORT_SYMBOL(iscsit_allocate_cmd);
  161. struct iscsi_seq *iscsit_get_seq_holder_for_datain(
  162. struct iscsi_cmd *cmd,
  163. u32 seq_send_order)
  164. {
  165. u32 i;
  166. for (i = 0; i < cmd->seq_count; i++)
  167. if (cmd->seq_list[i].seq_send_order == seq_send_order)
  168. return &cmd->seq_list[i];
  169. return NULL;
  170. }
  171. struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
  172. {
  173. u32 i;
  174. if (!cmd->seq_list) {
  175. pr_err("struct iscsi_cmd->seq_list is NULL!\n");
  176. return NULL;
  177. }
  178. for (i = 0; i < cmd->seq_count; i++) {
  179. if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
  180. continue;
  181. if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
  182. cmd->seq_send_order++;
  183. return &cmd->seq_list[i];
  184. }
  185. }
  186. return NULL;
  187. }
  188. struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
  189. struct iscsi_cmd *cmd,
  190. u32 r2t_sn)
  191. {
  192. struct iscsi_r2t *r2t;
  193. spin_lock_bh(&cmd->r2t_lock);
  194. list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
  195. if (r2t->r2t_sn == r2t_sn) {
  196. spin_unlock_bh(&cmd->r2t_lock);
  197. return r2t;
  198. }
  199. }
  200. spin_unlock_bh(&cmd->r2t_lock);
  201. return NULL;
  202. }
  203. static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
  204. {
  205. u32 max_cmdsn;
  206. int ret;
  207. /*
  208. * This is the proper method of checking received CmdSN against
  209. * ExpCmdSN and MaxCmdSN values, as well as accounting for out
  210. * or order CmdSNs due to multiple connection sessions and/or
  211. * CRC failures.
  212. */
  213. max_cmdsn = atomic_read(&sess->max_cmd_sn);
  214. if (iscsi_sna_gt(cmdsn, max_cmdsn)) {
  215. pr_err("Received CmdSN: 0x%08x is greater than"
  216. " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn);
  217. ret = CMDSN_MAXCMDSN_OVERRUN;
  218. } else if (cmdsn == sess->exp_cmd_sn) {
  219. sess->exp_cmd_sn++;
  220. pr_debug("Received CmdSN matches ExpCmdSN,"
  221. " incremented ExpCmdSN to: 0x%08x\n",
  222. sess->exp_cmd_sn);
  223. ret = CMDSN_NORMAL_OPERATION;
  224. } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
  225. pr_debug("Received CmdSN: 0x%08x is greater"
  226. " than ExpCmdSN: 0x%08x, not acknowledging.\n",
  227. cmdsn, sess->exp_cmd_sn);
  228. ret = CMDSN_HIGHER_THAN_EXP;
  229. } else {
  230. pr_err("Received CmdSN: 0x%08x is less than"
  231. " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
  232. sess->exp_cmd_sn);
  233. ret = CMDSN_LOWER_THAN_EXP;
  234. }
  235. return ret;
  236. }
  237. /*
  238. * Commands may be received out of order if MC/S is in use.
  239. * Ensure they are executed in CmdSN order.
  240. */
  241. int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
  242. unsigned char *buf, __be32 cmdsn)
  243. {
  244. int ret, cmdsn_ret;
  245. bool reject = false;
  246. u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;
  247. mutex_lock(&conn->sess->cmdsn_mutex);
  248. cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn));
  249. switch (cmdsn_ret) {
  250. case CMDSN_NORMAL_OPERATION:
  251. ret = iscsit_execute_cmd(cmd, 0);
  252. if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
  253. iscsit_execute_ooo_cmdsns(conn->sess);
  254. else if (ret < 0) {
  255. reject = true;
  256. ret = CMDSN_ERROR_CANNOT_RECOVER;
  257. }
  258. break;
  259. case CMDSN_HIGHER_THAN_EXP:
  260. ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
  261. if (ret < 0) {
  262. reject = true;
  263. ret = CMDSN_ERROR_CANNOT_RECOVER;
  264. break;
  265. }
  266. ret = CMDSN_HIGHER_THAN_EXP;
  267. break;
  268. case CMDSN_LOWER_THAN_EXP:
  269. case CMDSN_MAXCMDSN_OVERRUN:
  270. default:
  271. cmd->i_state = ISTATE_REMOVE;
  272. iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
  273. /*
  274. * Existing callers for iscsit_sequence_cmd() will silently
  275. * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
  276. * return for CMDSN_MAXCMDSN_OVERRUN as well..
  277. */
  278. ret = CMDSN_LOWER_THAN_EXP;
  279. break;
  280. }
  281. mutex_unlock(&conn->sess->cmdsn_mutex);
  282. if (reject)
  283. iscsit_reject_cmd(cmd, reason, buf);
  284. return ret;
  285. }
  286. EXPORT_SYMBOL(iscsit_sequence_cmd);
  287. int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
  288. {
  289. struct iscsi_conn *conn = cmd->conn;
  290. struct se_cmd *se_cmd = &cmd->se_cmd;
  291. struct iscsi_data *hdr = (struct iscsi_data *) buf;
  292. u32 payload_length = ntoh24(hdr->dlength);
  293. if (conn->sess->sess_ops->InitialR2T) {
  294. pr_err("Received unexpected unsolicited data"
  295. " while InitialR2T=Yes, protocol error.\n");
  296. transport_send_check_condition_and_sense(se_cmd,
  297. TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
  298. return -1;
  299. }
  300. if ((cmd->first_burst_len + payload_length) >
  301. conn->sess->sess_ops->FirstBurstLength) {
  302. pr_err("Total %u bytes exceeds FirstBurstLength: %u"
  303. " for this Unsolicited DataOut Burst.\n",
  304. (cmd->first_burst_len + payload_length),
  305. conn->sess->sess_ops->FirstBurstLength);
  306. transport_send_check_condition_and_sense(se_cmd,
  307. TCM_INCORRECT_AMOUNT_OF_DATA, 0);
  308. return -1;
  309. }
  310. if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
  311. return 0;
  312. if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
  313. ((cmd->first_burst_len + payload_length) !=
  314. conn->sess->sess_ops->FirstBurstLength)) {
  315. pr_err("Unsolicited non-immediate data received %u"
  316. " does not equal FirstBurstLength: %u, and does"
  317. " not equal ExpXferLen %u.\n",
  318. (cmd->first_burst_len + payload_length),
  319. conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
  320. transport_send_check_condition_and_sense(se_cmd,
  321. TCM_INCORRECT_AMOUNT_OF_DATA, 0);
  322. return -1;
  323. }
  324. return 0;
  325. }
  326. struct iscsi_cmd *iscsit_find_cmd_from_itt(
  327. struct iscsi_conn *conn,
  328. itt_t init_task_tag)
  329. {
  330. struct iscsi_cmd *cmd;
  331. spin_lock_bh(&conn->cmd_lock);
  332. list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
  333. if (cmd->init_task_tag == init_task_tag) {
  334. spin_unlock_bh(&conn->cmd_lock);
  335. return cmd;
  336. }
  337. }
  338. spin_unlock_bh(&conn->cmd_lock);
  339. pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
  340. init_task_tag, conn->cid);
  341. return NULL;
  342. }
  343. EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
  344. struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
  345. struct iscsi_conn *conn,
  346. itt_t init_task_tag,
  347. u32 length)
  348. {
  349. struct iscsi_cmd *cmd;
  350. spin_lock_bh(&conn->cmd_lock);
  351. list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
  352. if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT)
  353. continue;
  354. if (cmd->init_task_tag == init_task_tag) {
  355. spin_unlock_bh(&conn->cmd_lock);
  356. return cmd;
  357. }
  358. }
  359. spin_unlock_bh(&conn->cmd_lock);
  360. pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
  361. " dumping payload\n", init_task_tag, conn->cid);
  362. if (length)
  363. iscsit_dump_data_payload(conn, length, 1);
  364. return NULL;
  365. }
  366. EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump);
  367. struct iscsi_cmd *iscsit_find_cmd_from_ttt(
  368. struct iscsi_conn *conn,
  369. u32 targ_xfer_tag)
  370. {
  371. struct iscsi_cmd *cmd = NULL;
  372. spin_lock_bh(&conn->cmd_lock);
  373. list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
  374. if (cmd->targ_xfer_tag == targ_xfer_tag) {
  375. spin_unlock_bh(&conn->cmd_lock);
  376. return cmd;
  377. }
  378. }
  379. spin_unlock_bh(&conn->cmd_lock);
  380. pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
  381. targ_xfer_tag, conn->cid);
  382. return NULL;
  383. }
  384. int iscsit_find_cmd_for_recovery(
  385. struct iscsi_session *sess,
  386. struct iscsi_cmd **cmd_ptr,
  387. struct iscsi_conn_recovery **cr_ptr,
  388. itt_t init_task_tag)
  389. {
  390. struct iscsi_cmd *cmd = NULL;
  391. struct iscsi_conn_recovery *cr;
  392. /*
  393. * Scan through the inactive connection recovery list's command list.
  394. * If init_task_tag matches the command is still alligent.
  395. */
  396. spin_lock(&sess->cr_i_lock);
  397. list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
  398. spin_lock(&cr->conn_recovery_cmd_lock);
  399. list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
  400. if (cmd->init_task_tag == init_task_tag) {
  401. spin_unlock(&cr->conn_recovery_cmd_lock);
  402. spin_unlock(&sess->cr_i_lock);
  403. *cr_ptr = cr;
  404. *cmd_ptr = cmd;
  405. return -2;
  406. }
  407. }
  408. spin_unlock(&cr->conn_recovery_cmd_lock);
  409. }
  410. spin_unlock(&sess->cr_i_lock);
  411. /*
  412. * Scan through the active connection recovery list's command list.
  413. * If init_task_tag matches the command is ready to be reassigned.
  414. */
  415. spin_lock(&sess->cr_a_lock);
  416. list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
  417. spin_lock(&cr->conn_recovery_cmd_lock);
  418. list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
  419. if (cmd->init_task_tag == init_task_tag) {
  420. spin_unlock(&cr->conn_recovery_cmd_lock);
  421. spin_unlock(&sess->cr_a_lock);
  422. *cr_ptr = cr;
  423. *cmd_ptr = cmd;
  424. return 0;
  425. }
  426. }
  427. spin_unlock(&cr->conn_recovery_cmd_lock);
  428. }
  429. spin_unlock(&sess->cr_a_lock);
  430. return -1;
  431. }
  432. void iscsit_add_cmd_to_immediate_queue(
  433. struct iscsi_cmd *cmd,
  434. struct iscsi_conn *conn,
  435. u8 state)
  436. {
  437. struct iscsi_queue_req *qr;
  438. qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
  439. if (!qr) {
  440. pr_err("Unable to allocate memory for"
  441. " struct iscsi_queue_req\n");
  442. return;
  443. }
  444. INIT_LIST_HEAD(&qr->qr_list);
  445. qr->cmd = cmd;
  446. qr->state = state;
  447. spin_lock_bh(&conn->immed_queue_lock);
  448. list_add_tail(&qr->qr_list, &conn->immed_queue_list);
  449. atomic_inc(&cmd->immed_queue_count);
  450. atomic_set(&conn->check_immediate_queue, 1);
  451. spin_unlock_bh(&conn->immed_queue_lock);
  452. wake_up(&conn->queues_wq);
  453. }
  454. EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
  455. struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
  456. {
  457. struct iscsi_queue_req *qr;
  458. spin_lock_bh(&conn->immed_queue_lock);
  459. if (list_empty(&conn->immed_queue_list)) {
  460. spin_unlock_bh(&conn->immed_queue_lock);
  461. return NULL;
  462. }
  463. qr = list_first_entry(&conn->immed_queue_list,
  464. struct iscsi_queue_req, qr_list);
  465. list_del(&qr->qr_list);
  466. if (qr->cmd)
  467. atomic_dec(&qr->cmd->immed_queue_count);
  468. spin_unlock_bh(&conn->immed_queue_lock);
  469. return qr;
  470. }
  471. static void iscsit_remove_cmd_from_immediate_queue(
  472. struct iscsi_cmd *cmd,
  473. struct iscsi_conn *conn)
  474. {
  475. struct iscsi_queue_req *qr, *qr_tmp;
  476. spin_lock_bh(&conn->immed_queue_lock);
  477. if (!atomic_read(&cmd->immed_queue_count)) {
  478. spin_unlock_bh(&conn->immed_queue_lock);
  479. return;
  480. }
  481. list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
  482. if (qr->cmd != cmd)
  483. continue;
  484. atomic_dec(&qr->cmd->immed_queue_count);
  485. list_del(&qr->qr_list);
  486. kmem_cache_free(lio_qr_cache, qr);
  487. }
  488. spin_unlock_bh(&conn->immed_queue_lock);
  489. if (atomic_read(&cmd->immed_queue_count)) {
  490. pr_err("ITT: 0x%08x immed_queue_count: %d\n",
  491. cmd->init_task_tag,
  492. atomic_read(&cmd->immed_queue_count));
  493. }
  494. }
  495. int iscsit_add_cmd_to_response_queue(
  496. struct iscsi_cmd *cmd,
  497. struct iscsi_conn *conn,
  498. u8 state)
  499. {
  500. struct iscsi_queue_req *qr;
  501. qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
  502. if (!qr) {
  503. pr_err("Unable to allocate memory for"
  504. " struct iscsi_queue_req\n");
  505. return -ENOMEM;
  506. }
  507. INIT_LIST_HEAD(&qr->qr_list);
  508. qr->cmd = cmd;
  509. qr->state = state;
  510. spin_lock_bh(&conn->response_queue_lock);
  511. list_add_tail(&qr->qr_list, &conn->response_queue_list);
  512. atomic_inc(&cmd->response_queue_count);
  513. spin_unlock_bh(&conn->response_queue_lock);
  514. wake_up(&conn->queues_wq);
  515. return 0;
  516. }
  517. struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
  518. {
  519. struct iscsi_queue_req *qr;
  520. spin_lock_bh(&conn->response_queue_lock);
  521. if (list_empty(&conn->response_queue_list)) {
  522. spin_unlock_bh(&conn->response_queue_lock);
  523. return NULL;
  524. }
  525. qr = list_first_entry(&conn->response_queue_list,
  526. struct iscsi_queue_req, qr_list);
  527. list_del(&qr->qr_list);
  528. if (qr->cmd)
  529. atomic_dec(&qr->cmd->response_queue_count);
  530. spin_unlock_bh(&conn->response_queue_lock);
  531. return qr;
  532. }
  533. static void iscsit_remove_cmd_from_response_queue(
  534. struct iscsi_cmd *cmd,
  535. struct iscsi_conn *conn)
  536. {
  537. struct iscsi_queue_req *qr, *qr_tmp;
  538. spin_lock_bh(&conn->response_queue_lock);
  539. if (!atomic_read(&cmd->response_queue_count)) {
  540. spin_unlock_bh(&conn->response_queue_lock);
  541. return;
  542. }
  543. list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
  544. qr_list) {
  545. if (qr->cmd != cmd)
  546. continue;
  547. atomic_dec(&qr->cmd->response_queue_count);
  548. list_del(&qr->qr_list);
  549. kmem_cache_free(lio_qr_cache, qr);
  550. }
  551. spin_unlock_bh(&conn->response_queue_lock);
  552. if (atomic_read(&cmd->response_queue_count)) {
  553. pr_err("ITT: 0x%08x response_queue_count: %d\n",
  554. cmd->init_task_tag,
  555. atomic_read(&cmd->response_queue_count));
  556. }
  557. }
  558. bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
  559. {
  560. bool empty;
  561. spin_lock_bh(&conn->immed_queue_lock);
  562. empty = list_empty(&conn->immed_queue_list);
  563. spin_unlock_bh(&conn->immed_queue_lock);
  564. if (!empty)
  565. return empty;
  566. spin_lock_bh(&conn->response_queue_lock);
  567. empty = list_empty(&conn->response_queue_list);
  568. spin_unlock_bh(&conn->response_queue_lock);
  569. return empty;
  570. }
  571. void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
  572. {
  573. struct iscsi_queue_req *qr, *qr_tmp;
  574. spin_lock_bh(&conn->immed_queue_lock);
  575. list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
  576. list_del(&qr->qr_list);
  577. if (qr->cmd)
  578. atomic_dec(&qr->cmd->immed_queue_count);
  579. kmem_cache_free(lio_qr_cache, qr);
  580. }
  581. spin_unlock_bh(&conn->immed_queue_lock);
  582. spin_lock_bh(&conn->response_queue_lock);
  583. list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
  584. qr_list) {
  585. list_del(&qr->qr_list);
  586. if (qr->cmd)
  587. atomic_dec(&qr->cmd->response_queue_count);
  588. kmem_cache_free(lio_qr_cache, qr);
  589. }
  590. spin_unlock_bh(&conn->response_queue_lock);
  591. }
  592. void iscsit_release_cmd(struct iscsi_cmd *cmd)
  593. {
  594. struct iscsi_session *sess;
  595. struct se_cmd *se_cmd = &cmd->se_cmd;
  596. WARN_ON(!list_empty(&cmd->i_conn_node));
  597. if (cmd->conn)
  598. sess = cmd->conn->sess;
  599. else
  600. sess = cmd->sess;
  601. BUG_ON(!sess || !sess->se_sess);
  602. kfree(cmd->buf_ptr);
  603. kfree(cmd->pdu_list);
  604. kfree(cmd->seq_list);
  605. kfree(cmd->tmr_req);
  606. kfree(cmd->iov_data);
  607. kfree(cmd->text_in_ptr);
  608. percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag);
  609. }
  610. EXPORT_SYMBOL(iscsit_release_cmd);
  611. void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
  612. {
  613. struct iscsi_conn *conn = cmd->conn;
  614. WARN_ON(!list_empty(&cmd->i_conn_node));
  615. if (cmd->data_direction == DMA_TO_DEVICE) {
  616. iscsit_stop_dataout_timer(cmd);
  617. iscsit_free_r2ts_from_list(cmd);
  618. }
  619. if (cmd->data_direction == DMA_FROM_DEVICE)
  620. iscsit_free_all_datain_reqs(cmd);
  621. if (conn && check_queues) {
  622. iscsit_remove_cmd_from_immediate_queue(cmd, conn);
  623. iscsit_remove_cmd_from_response_queue(cmd, conn);
  624. }
  625. if (conn && conn->conn_transport->iscsit_release_cmd)
  626. conn->conn_transport->iscsit_release_cmd(conn, cmd);
  627. }
  628. void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
  629. {
  630. struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
  631. int rc;
  632. __iscsit_free_cmd(cmd, shutdown);
  633. if (se_cmd) {
  634. rc = transport_generic_free_cmd(se_cmd, shutdown);
  635. if (!rc && shutdown && se_cmd->se_sess) {
  636. __iscsit_free_cmd(cmd, shutdown);
  637. target_put_sess_cmd(se_cmd);
  638. }
  639. } else {
  640. iscsit_release_cmd(cmd);
  641. }
  642. }
  643. EXPORT_SYMBOL(iscsit_free_cmd);
  644. int iscsit_check_session_usage_count(struct iscsi_session *sess)
  645. {
  646. spin_lock_bh(&sess->session_usage_lock);
  647. if (sess->session_usage_count != 0) {
  648. sess->session_waiting_on_uc = 1;
  649. spin_unlock_bh(&sess->session_usage_lock);
  650. if (in_interrupt())
  651. return 2;
  652. wait_for_completion(&sess->session_waiting_on_uc_comp);
  653. return 1;
  654. }
  655. spin_unlock_bh(&sess->session_usage_lock);
  656. return 0;
  657. }
  658. void iscsit_dec_session_usage_count(struct iscsi_session *sess)
  659. {
  660. spin_lock_bh(&sess->session_usage_lock);
  661. sess->session_usage_count--;
  662. if (!sess->session_usage_count && sess->session_waiting_on_uc)
  663. complete(&sess->session_waiting_on_uc_comp);
  664. spin_unlock_bh(&sess->session_usage_lock);
  665. }
  666. void iscsit_inc_session_usage_count(struct iscsi_session *sess)
  667. {
  668. spin_lock_bh(&sess->session_usage_lock);
  669. sess->session_usage_count++;
  670. spin_unlock_bh(&sess->session_usage_lock);
  671. }
  672. struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
  673. {
  674. struct iscsi_conn *conn;
  675. spin_lock_bh(&sess->conn_lock);
  676. list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
  677. if ((conn->cid == cid) &&
  678. (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
  679. iscsit_inc_conn_usage_count(conn);
  680. spin_unlock_bh(&sess->conn_lock);
  681. return conn;
  682. }
  683. }
  684. spin_unlock_bh(&sess->conn_lock);
  685. return NULL;
  686. }
  687. struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
  688. {
  689. struct iscsi_conn *conn;
  690. spin_lock_bh(&sess->conn_lock);
  691. list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
  692. if (conn->cid == cid) {
  693. iscsit_inc_conn_usage_count(conn);
  694. spin_lock(&conn->state_lock);
  695. atomic_set(&conn->connection_wait_rcfr, 1);
  696. spin_unlock(&conn->state_lock);
  697. spin_unlock_bh(&sess->conn_lock);
  698. return conn;
  699. }
  700. }
  701. spin_unlock_bh(&sess->conn_lock);
  702. return NULL;
  703. }
  704. void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
  705. {
  706. spin_lock_bh(&conn->conn_usage_lock);
  707. if (conn->conn_usage_count != 0) {
  708. conn->conn_waiting_on_uc = 1;
  709. spin_unlock_bh(&conn->conn_usage_lock);
  710. wait_for_completion(&conn->conn_waiting_on_uc_comp);
  711. return;
  712. }
  713. spin_unlock_bh(&conn->conn_usage_lock);
  714. }
  715. void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
  716. {
  717. spin_lock_bh(&conn->conn_usage_lock);
  718. conn->conn_usage_count--;
  719. if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
  720. complete(&conn->conn_waiting_on_uc_comp);
  721. spin_unlock_bh(&conn->conn_usage_lock);
  722. }
  723. void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
  724. {
  725. spin_lock_bh(&conn->conn_usage_lock);
  726. conn->conn_usage_count++;
  727. spin_unlock_bh(&conn->conn_usage_lock);
  728. }
  729. static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
  730. {
  731. u8 state;
  732. struct iscsi_cmd *cmd;
  733. cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
  734. if (!cmd)
  735. return -1;
  736. cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
  737. state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
  738. ISTATE_SEND_NOPIN_NO_RESPONSE;
  739. cmd->init_task_tag = RESERVED_ITT;
  740. cmd->targ_xfer_tag = (want_response) ?
  741. session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
  742. spin_lock_bh(&conn->cmd_lock);
  743. list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
  744. spin_unlock_bh(&conn->cmd_lock);
  745. if (want_response)
  746. iscsit_start_nopin_response_timer(conn);
  747. iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
  748. return 0;
  749. }
  750. void iscsit_handle_nopin_response_timeout(struct timer_list *t)
  751. {
  752. struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer);
  753. iscsit_inc_conn_usage_count(conn);
  754. spin_lock_bh(&conn->nopin_timer_lock);
  755. if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
  756. spin_unlock_bh(&conn->nopin_timer_lock);
  757. iscsit_dec_conn_usage_count(conn);
  758. return;
  759. }
  760. pr_debug("Did not receive response to NOPIN on CID: %hu on"
  761. " SID: %u, failing connection.\n", conn->cid,
  762. conn->sess->sid);
  763. conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
  764. spin_unlock_bh(&conn->nopin_timer_lock);
  765. {
  766. struct iscsi_portal_group *tpg = conn->sess->tpg;
  767. struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
  768. if (tiqn) {
  769. spin_lock_bh(&tiqn->sess_err_stats.lock);
  770. strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
  771. conn->sess->sess_ops->InitiatorName);
  772. tiqn->sess_err_stats.last_sess_failure_type =
  773. ISCSI_SESS_ERR_CXN_TIMEOUT;
  774. tiqn->sess_err_stats.cxn_timeout_errors++;
  775. atomic_long_inc(&conn->sess->conn_timeout_errors);
  776. spin_unlock_bh(&tiqn->sess_err_stats.lock);
  777. }
  778. }
  779. iscsit_cause_connection_reinstatement(conn, 0);
  780. iscsit_dec_conn_usage_count(conn);
  781. }
  782. void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
  783. {
  784. struct iscsi_session *sess = conn->sess;
  785. struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
  786. spin_lock_bh(&conn->nopin_timer_lock);
  787. if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
  788. spin_unlock_bh(&conn->nopin_timer_lock);
  789. return;
  790. }
  791. mod_timer(&conn->nopin_response_timer,
  792. (get_jiffies_64() + na->nopin_response_timeout * HZ));
  793. spin_unlock_bh(&conn->nopin_timer_lock);
  794. }
  795. /*
  796. * Called with conn->nopin_timer_lock held.
  797. */
  798. void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
  799. {
  800. struct iscsi_session *sess = conn->sess;
  801. struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
  802. spin_lock_bh(&conn->nopin_timer_lock);
  803. if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
  804. spin_unlock_bh(&conn->nopin_timer_lock);
  805. return;
  806. }
  807. conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
  808. conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
  809. mod_timer(&conn->nopin_response_timer,
  810. jiffies + na->nopin_response_timeout * HZ);
  811. pr_debug("Started NOPIN Response Timer on CID: %d to %u"
  812. " seconds\n", conn->cid, na->nopin_response_timeout);
  813. spin_unlock_bh(&conn->nopin_timer_lock);
  814. }
  815. void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
  816. {
  817. spin_lock_bh(&conn->nopin_timer_lock);
  818. if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
  819. spin_unlock_bh(&conn->nopin_timer_lock);
  820. return;
  821. }
  822. conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
  823. spin_unlock_bh(&conn->nopin_timer_lock);
  824. del_timer_sync(&conn->nopin_response_timer);
  825. spin_lock_bh(&conn->nopin_timer_lock);
  826. conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
  827. spin_unlock_bh(&conn->nopin_timer_lock);
  828. }
  829. void iscsit_handle_nopin_timeout(struct timer_list *t)
  830. {
  831. struct iscsi_conn *conn = from_timer(conn, t, nopin_timer);
  832. iscsit_inc_conn_usage_count(conn);
  833. spin_lock_bh(&conn->nopin_timer_lock);
  834. if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
  835. spin_unlock_bh(&conn->nopin_timer_lock);
  836. iscsit_dec_conn_usage_count(conn);
  837. return;
  838. }
  839. conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
  840. spin_unlock_bh(&conn->nopin_timer_lock);
  841. iscsit_add_nopin(conn, 1);
  842. iscsit_dec_conn_usage_count(conn);
  843. }
  844. /*
  845. * Called with conn->nopin_timer_lock held.
  846. */
  847. void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
  848. {
  849. struct iscsi_session *sess = conn->sess;
  850. struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
  851. /*
  852. * NOPIN timeout is disabled.
  853. */
  854. if (!na->nopin_timeout)
  855. return;
  856. if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
  857. return;
  858. conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
  859. conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
  860. mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
  861. pr_debug("Started NOPIN Timer on CID: %d at %u second"
  862. " interval\n", conn->cid, na->nopin_timeout);
  863. }
  864. void iscsit_start_nopin_timer(struct iscsi_conn *conn)
  865. {
  866. struct iscsi_session *sess = conn->sess;
  867. struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
  868. /*
  869. * NOPIN timeout is disabled..
  870. */
  871. if (!na->nopin_timeout)
  872. return;
  873. spin_lock_bh(&conn->nopin_timer_lock);
  874. if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
  875. spin_unlock_bh(&conn->nopin_timer_lock);
  876. return;
  877. }
  878. conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
  879. conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
  880. mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
  881. pr_debug("Started NOPIN Timer on CID: %d at %u second"
  882. " interval\n", conn->cid, na->nopin_timeout);
  883. spin_unlock_bh(&conn->nopin_timer_lock);
  884. }
  885. void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
  886. {
  887. spin_lock_bh(&conn->nopin_timer_lock);
  888. if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
  889. spin_unlock_bh(&conn->nopin_timer_lock);
  890. return;
  891. }
  892. conn->nopin_timer_flags |= ISCSI_TF_STOP;
  893. spin_unlock_bh(&conn->nopin_timer_lock);
  894. del_timer_sync(&conn->nopin_timer);
  895. spin_lock_bh(&conn->nopin_timer_lock);
  896. conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
  897. spin_unlock_bh(&conn->nopin_timer_lock);
  898. }
  899. int iscsit_send_tx_data(
  900. struct iscsi_cmd *cmd,
  901. struct iscsi_conn *conn,
  902. int use_misc)
  903. {
  904. int tx_sent, tx_size;
  905. u32 iov_count;
  906. struct kvec *iov;
  907. send_data:
  908. tx_size = cmd->tx_size;
  909. if (!use_misc) {
  910. iov = &cmd->iov_data[0];
  911. iov_count = cmd->iov_data_count;
  912. } else {
  913. iov = &cmd->iov_misc[0];
  914. iov_count = cmd->iov_misc_count;
  915. }
  916. tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
  917. if (tx_size != tx_sent) {
  918. if (tx_sent == -EAGAIN) {
  919. pr_err("tx_data() returned -EAGAIN\n");
  920. goto send_data;
  921. } else
  922. return -1;
  923. }
  924. cmd->tx_size = 0;
  925. return 0;
  926. }
  927. int iscsit_fe_sendpage_sg(
  928. struct iscsi_cmd *cmd,
  929. struct iscsi_conn *conn)
  930. {
  931. struct scatterlist *sg = cmd->first_data_sg;
  932. struct kvec iov;
  933. u32 tx_hdr_size, data_len;
  934. u32 offset = cmd->first_data_sg_off;
  935. int tx_sent, iov_off;
  936. send_hdr:
  937. tx_hdr_size = ISCSI_HDR_LEN;
  938. if (conn->conn_ops->HeaderDigest)
  939. tx_hdr_size += ISCSI_CRC_LEN;
  940. iov.iov_base = cmd->pdu;
  941. iov.iov_len = tx_hdr_size;
  942. tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
  943. if (tx_hdr_size != tx_sent) {
  944. if (tx_sent == -EAGAIN) {
  945. pr_err("tx_data() returned -EAGAIN\n");
  946. goto send_hdr;
  947. }
  948. return -1;
  949. }
  950. data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
  951. /*
  952. * Set iov_off used by padding and data digest tx_data() calls below
  953. * in order to determine proper offset into cmd->iov_data[]
  954. */
  955. if (conn->conn_ops->DataDigest) {
  956. data_len -= ISCSI_CRC_LEN;
  957. if (cmd->padding)
  958. iov_off = (cmd->iov_data_count - 2);
  959. else
  960. iov_off = (cmd->iov_data_count - 1);
  961. } else {
  962. iov_off = (cmd->iov_data_count - 1);
  963. }
  964. /*
  965. * Perform sendpage() for each page in the scatterlist
  966. */
  967. while (data_len) {
  968. u32 space = (sg->length - offset);
  969. u32 sub_len = min_t(u32, data_len, space);
  970. send_pg:
  971. tx_sent = conn->sock->ops->sendpage(conn->sock,
  972. sg_page(sg), sg->offset + offset, sub_len, 0);
  973. if (tx_sent != sub_len) {
  974. if (tx_sent == -EAGAIN) {
  975. pr_err("tcp_sendpage() returned"
  976. " -EAGAIN\n");
  977. goto send_pg;
  978. }
  979. pr_err("tcp_sendpage() failure: %d\n",
  980. tx_sent);
  981. return -1;
  982. }
  983. data_len -= sub_len;
  984. offset = 0;
  985. sg = sg_next(sg);
  986. }
  987. send_padding:
  988. if (cmd->padding) {
  989. struct kvec *iov_p = &cmd->iov_data[iov_off++];
  990. tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
  991. if (cmd->padding != tx_sent) {
  992. if (tx_sent == -EAGAIN) {
  993. pr_err("tx_data() returned -EAGAIN\n");
  994. goto send_padding;
  995. }
  996. return -1;
  997. }
  998. }
  999. send_datacrc:
  1000. if (conn->conn_ops->DataDigest) {
  1001. struct kvec *iov_d = &cmd->iov_data[iov_off];
  1002. tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
  1003. if (ISCSI_CRC_LEN != tx_sent) {
  1004. if (tx_sent == -EAGAIN) {
  1005. pr_err("tx_data() returned -EAGAIN\n");
  1006. goto send_datacrc;
  1007. }
  1008. return -1;
  1009. }
  1010. }
  1011. return 0;
  1012. }
  1013. /*
  1014. * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
  1015. * back to the Initiator when an expection condition occurs with the
  1016. * errors set in status_class and status_detail.
  1017. *
  1018. * Parameters: iSCSI Connection, Status Class, Status Detail.
  1019. * Returns: 0 on success, -1 on error.
  1020. */
  1021. int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
  1022. {
  1023. struct iscsi_login_rsp *hdr;
  1024. struct iscsi_login *login = conn->conn_login;
  1025. login->login_failed = 1;
  1026. iscsit_collect_login_stats(conn, status_class, status_detail);
  1027. memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
  1028. hdr = (struct iscsi_login_rsp *)&login->rsp[0];
  1029. hdr->opcode = ISCSI_OP_LOGIN_RSP;
  1030. hdr->status_class = status_class;
  1031. hdr->status_detail = status_detail;
  1032. hdr->itt = conn->login_itt;
  1033. return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
  1034. }
  1035. void iscsit_print_session_params(struct iscsi_session *sess)
  1036. {
  1037. struct iscsi_conn *conn;
  1038. pr_debug("-----------------------------[Session Params for"
  1039. " SID: %u]-----------------------------\n", sess->sid);
  1040. spin_lock_bh(&sess->conn_lock);
  1041. list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
  1042. iscsi_dump_conn_ops(conn->conn_ops);
  1043. spin_unlock_bh(&sess->conn_lock);
  1044. iscsi_dump_sess_ops(sess->sess_ops);
  1045. }
  1046. static int iscsit_do_rx_data(
  1047. struct iscsi_conn *conn,
  1048. struct iscsi_data_count *count)
  1049. {
  1050. int data = count->data_length, rx_loop = 0, total_rx = 0;
  1051. struct msghdr msg;
  1052. if (!conn || !conn->sock || !conn->conn_ops)
  1053. return -1;
  1054. memset(&msg, 0, sizeof(struct msghdr));
  1055. iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC,
  1056. count->iov, count->iov_count, data);
  1057. while (msg_data_left(&msg)) {
  1058. rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
  1059. if (rx_loop <= 0) {
  1060. pr_debug("rx_loop: %d total_rx: %d\n",
  1061. rx_loop, total_rx);
  1062. return rx_loop;
  1063. }
  1064. total_rx += rx_loop;
  1065. pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
  1066. rx_loop, total_rx, data);
  1067. }
  1068. return total_rx;
  1069. }
  1070. int rx_data(
  1071. struct iscsi_conn *conn,
  1072. struct kvec *iov,
  1073. int iov_count,
  1074. int data)
  1075. {
  1076. struct iscsi_data_count c;
  1077. if (!conn || !conn->sock || !conn->conn_ops)
  1078. return -1;
  1079. memset(&c, 0, sizeof(struct iscsi_data_count));
  1080. c.iov = iov;
  1081. c.iov_count = iov_count;
  1082. c.data_length = data;
  1083. c.type = ISCSI_RX_DATA;
  1084. return iscsit_do_rx_data(conn, &c);
  1085. }
  1086. int tx_data(
  1087. struct iscsi_conn *conn,
  1088. struct kvec *iov,
  1089. int iov_count,
  1090. int data)
  1091. {
  1092. struct msghdr msg;
  1093. int total_tx = 0;
  1094. if (!conn || !conn->sock || !conn->conn_ops)
  1095. return -1;
  1096. if (data <= 0) {
  1097. pr_err("Data length is: %d\n", data);
  1098. return -1;
  1099. }
  1100. memset(&msg, 0, sizeof(struct msghdr));
  1101. iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC,
  1102. iov, iov_count, data);
  1103. while (msg_data_left(&msg)) {
  1104. int tx_loop = sock_sendmsg(conn->sock, &msg);
  1105. if (tx_loop <= 0) {
  1106. pr_debug("tx_loop: %d total_tx %d\n",
  1107. tx_loop, total_tx);
  1108. return tx_loop;
  1109. }
  1110. total_tx += tx_loop;
  1111. pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
  1112. tx_loop, total_tx, data);
  1113. }
  1114. return total_tx;
  1115. }
  1116. void iscsit_collect_login_stats(
  1117. struct iscsi_conn *conn,
  1118. u8 status_class,
  1119. u8 status_detail)
  1120. {
  1121. struct iscsi_param *intrname = NULL;
  1122. struct iscsi_tiqn *tiqn;
  1123. struct iscsi_login_stats *ls;
  1124. tiqn = iscsit_snmp_get_tiqn(conn);
  1125. if (!tiqn)
  1126. return;
  1127. ls = &tiqn->login_stats;
  1128. spin_lock(&ls->lock);
  1129. if (status_class == ISCSI_STATUS_CLS_SUCCESS)
  1130. ls->accepts++;
  1131. else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
  1132. ls->redirects++;
  1133. ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
  1134. } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
  1135. (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
  1136. ls->authenticate_fails++;
  1137. ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE;
  1138. } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
  1139. (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
  1140. ls->authorize_fails++;
  1141. ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
  1142. } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
  1143. (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
  1144. ls->negotiate_fails++;
  1145. ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
  1146. } else {
  1147. ls->other_fails++;
  1148. ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
  1149. }
  1150. /* Save initiator name, ip address and time, if it is a failed login */
  1151. if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
  1152. if (conn->param_list)
  1153. intrname = iscsi_find_param_from_key(INITIATORNAME,
  1154. conn->param_list);
  1155. strlcpy(ls->last_intr_fail_name,
  1156. (intrname ? intrname->value : "Unknown"),
  1157. sizeof(ls->last_intr_fail_name));
  1158. ls->last_intr_fail_ip_family = conn->login_family;
  1159. ls->last_intr_fail_sockaddr = conn->login_sockaddr;
  1160. ls->last_fail_time = get_jiffies_64();
  1161. }
  1162. spin_unlock(&ls->lock);
  1163. }
  1164. struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
  1165. {
  1166. struct iscsi_portal_group *tpg;
  1167. if (!conn)
  1168. return NULL;
  1169. tpg = conn->tpg;
  1170. if (!tpg)
  1171. return NULL;
  1172. if (!tpg->tpg_tiqn)
  1173. return NULL;
  1174. return tpg->tpg_tiqn;
  1175. }