target_core_transport.c 83 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127
  1. /*******************************************************************************
  2. * Filename: target_core_transport.c
  3. *
  4. * This file contains the Generic Target Engine Core.
  5. *
  6. * (c) Copyright 2002-2013 Datera, Inc.
  7. *
  8. * Nicholas A. Bellinger <nab@kernel.org>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23. *
  24. ******************************************************************************/
  25. #include <linux/net.h>
  26. #include <linux/delay.h>
  27. #include <linux/string.h>
  28. #include <linux/timer.h>
  29. #include <linux/slab.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/kthread.h>
  32. #include <linux/in.h>
  33. #include <linux/cdrom.h>
  34. #include <linux/module.h>
  35. #include <linux/ratelimit.h>
  36. #include <linux/vmalloc.h>
  37. #include <asm/unaligned.h>
  38. #include <net/sock.h>
  39. #include <net/tcp.h>
  40. #include <scsi/scsi_proto.h>
  41. #include <scsi/scsi_common.h>
  42. #include <target/target_core_base.h>
  43. #include <target/target_core_backend.h>
  44. #include <target/target_core_fabric.h>
  45. #include "target_core_internal.h"
  46. #include "target_core_alua.h"
  47. #include "target_core_pr.h"
  48. #include "target_core_ua.h"
  49. #define CREATE_TRACE_POINTS
  50. #include <trace/events/target.h>
  51. static struct workqueue_struct *target_completion_wq;
  52. static struct kmem_cache *se_sess_cache;
  53. struct kmem_cache *se_ua_cache;
  54. struct kmem_cache *t10_pr_reg_cache;
  55. struct kmem_cache *t10_alua_lu_gp_cache;
  56. struct kmem_cache *t10_alua_lu_gp_mem_cache;
  57. struct kmem_cache *t10_alua_tg_pt_gp_cache;
  58. struct kmem_cache *t10_alua_lba_map_cache;
  59. struct kmem_cache *t10_alua_lba_map_mem_cache;
  60. static void transport_complete_task_attr(struct se_cmd *cmd);
  61. static void transport_handle_queue_full(struct se_cmd *cmd,
  62. struct se_device *dev);
  63. static int transport_put_cmd(struct se_cmd *cmd);
  64. static void target_complete_ok_work(struct work_struct *work);
  65. int init_se_kmem_caches(void)
  66. {
  67. se_sess_cache = kmem_cache_create("se_sess_cache",
  68. sizeof(struct se_session), __alignof__(struct se_session),
  69. 0, NULL);
  70. if (!se_sess_cache) {
  71. pr_err("kmem_cache_create() for struct se_session"
  72. " failed\n");
  73. goto out;
  74. }
  75. se_ua_cache = kmem_cache_create("se_ua_cache",
  76. sizeof(struct se_ua), __alignof__(struct se_ua),
  77. 0, NULL);
  78. if (!se_ua_cache) {
  79. pr_err("kmem_cache_create() for struct se_ua failed\n");
  80. goto out_free_sess_cache;
  81. }
  82. t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
  83. sizeof(struct t10_pr_registration),
  84. __alignof__(struct t10_pr_registration), 0, NULL);
  85. if (!t10_pr_reg_cache) {
  86. pr_err("kmem_cache_create() for struct t10_pr_registration"
  87. " failed\n");
  88. goto out_free_ua_cache;
  89. }
  90. t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
  91. sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
  92. 0, NULL);
  93. if (!t10_alua_lu_gp_cache) {
  94. pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
  95. " failed\n");
  96. goto out_free_pr_reg_cache;
  97. }
  98. t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
  99. sizeof(struct t10_alua_lu_gp_member),
  100. __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
  101. if (!t10_alua_lu_gp_mem_cache) {
  102. pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
  103. "cache failed\n");
  104. goto out_free_lu_gp_cache;
  105. }
  106. t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
  107. sizeof(struct t10_alua_tg_pt_gp),
  108. __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
  109. if (!t10_alua_tg_pt_gp_cache) {
  110. pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
  111. "cache failed\n");
  112. goto out_free_lu_gp_mem_cache;
  113. }
  114. t10_alua_lba_map_cache = kmem_cache_create(
  115. "t10_alua_lba_map_cache",
  116. sizeof(struct t10_alua_lba_map),
  117. __alignof__(struct t10_alua_lba_map), 0, NULL);
  118. if (!t10_alua_lba_map_cache) {
  119. pr_err("kmem_cache_create() for t10_alua_lba_map_"
  120. "cache failed\n");
  121. goto out_free_tg_pt_gp_cache;
  122. }
  123. t10_alua_lba_map_mem_cache = kmem_cache_create(
  124. "t10_alua_lba_map_mem_cache",
  125. sizeof(struct t10_alua_lba_map_member),
  126. __alignof__(struct t10_alua_lba_map_member), 0, NULL);
  127. if (!t10_alua_lba_map_mem_cache) {
  128. pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
  129. "cache failed\n");
  130. goto out_free_lba_map_cache;
  131. }
  132. target_completion_wq = alloc_workqueue("target_completion",
  133. WQ_MEM_RECLAIM, 0);
  134. if (!target_completion_wq)
  135. goto out_free_lba_map_mem_cache;
  136. return 0;
  137. out_free_lba_map_mem_cache:
  138. kmem_cache_destroy(t10_alua_lba_map_mem_cache);
  139. out_free_lba_map_cache:
  140. kmem_cache_destroy(t10_alua_lba_map_cache);
  141. out_free_tg_pt_gp_cache:
  142. kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
  143. out_free_lu_gp_mem_cache:
  144. kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
  145. out_free_lu_gp_cache:
  146. kmem_cache_destroy(t10_alua_lu_gp_cache);
  147. out_free_pr_reg_cache:
  148. kmem_cache_destroy(t10_pr_reg_cache);
  149. out_free_ua_cache:
  150. kmem_cache_destroy(se_ua_cache);
  151. out_free_sess_cache:
  152. kmem_cache_destroy(se_sess_cache);
  153. out:
  154. return -ENOMEM;
  155. }
  156. void release_se_kmem_caches(void)
  157. {
  158. destroy_workqueue(target_completion_wq);
  159. kmem_cache_destroy(se_sess_cache);
  160. kmem_cache_destroy(se_ua_cache);
  161. kmem_cache_destroy(t10_pr_reg_cache);
  162. kmem_cache_destroy(t10_alua_lu_gp_cache);
  163. kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
  164. kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
  165. kmem_cache_destroy(t10_alua_lba_map_cache);
  166. kmem_cache_destroy(t10_alua_lba_map_mem_cache);
  167. }
  168. /* This code ensures unique mib indexes are handed out. */
  169. static DEFINE_SPINLOCK(scsi_mib_index_lock);
  170. static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
  171. /*
  172. * Allocate a new row index for the entry type specified
  173. */
  174. u32 scsi_get_new_index(scsi_index_t type)
  175. {
  176. u32 new_index;
  177. BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
  178. spin_lock(&scsi_mib_index_lock);
  179. new_index = ++scsi_mib_index[type];
  180. spin_unlock(&scsi_mib_index_lock);
  181. return new_index;
  182. }
  183. void transport_subsystem_check_init(void)
  184. {
  185. int ret;
  186. static int sub_api_initialized;
  187. if (sub_api_initialized)
  188. return;
  189. ret = request_module("target_core_iblock");
  190. if (ret != 0)
  191. pr_err("Unable to load target_core_iblock\n");
  192. ret = request_module("target_core_file");
  193. if (ret != 0)
  194. pr_err("Unable to load target_core_file\n");
  195. ret = request_module("target_core_pscsi");
  196. if (ret != 0)
  197. pr_err("Unable to load target_core_pscsi\n");
  198. ret = request_module("target_core_user");
  199. if (ret != 0)
  200. pr_err("Unable to load target_core_user\n");
  201. sub_api_initialized = 1;
  202. }
  203. struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
  204. {
  205. struct se_session *se_sess;
  206. se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
  207. if (!se_sess) {
  208. pr_err("Unable to allocate struct se_session from"
  209. " se_sess_cache\n");
  210. return ERR_PTR(-ENOMEM);
  211. }
  212. INIT_LIST_HEAD(&se_sess->sess_list);
  213. INIT_LIST_HEAD(&se_sess->sess_acl_list);
  214. INIT_LIST_HEAD(&se_sess->sess_cmd_list);
  215. INIT_LIST_HEAD(&se_sess->sess_wait_list);
  216. spin_lock_init(&se_sess->sess_cmd_lock);
  217. kref_init(&se_sess->sess_kref);
  218. se_sess->sup_prot_ops = sup_prot_ops;
  219. return se_sess;
  220. }
  221. EXPORT_SYMBOL(transport_init_session);
  222. int transport_alloc_session_tags(struct se_session *se_sess,
  223. unsigned int tag_num, unsigned int tag_size)
  224. {
  225. int rc;
  226. se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
  227. GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
  228. if (!se_sess->sess_cmd_map) {
  229. se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
  230. if (!se_sess->sess_cmd_map) {
  231. pr_err("Unable to allocate se_sess->sess_cmd_map\n");
  232. return -ENOMEM;
  233. }
  234. }
  235. rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
  236. if (rc < 0) {
  237. pr_err("Unable to init se_sess->sess_tag_pool,"
  238. " tag_num: %u\n", tag_num);
  239. kvfree(se_sess->sess_cmd_map);
  240. se_sess->sess_cmd_map = NULL;
  241. return -ENOMEM;
  242. }
  243. return 0;
  244. }
  245. EXPORT_SYMBOL(transport_alloc_session_tags);
  246. struct se_session *transport_init_session_tags(unsigned int tag_num,
  247. unsigned int tag_size,
  248. enum target_prot_op sup_prot_ops)
  249. {
  250. struct se_session *se_sess;
  251. int rc;
  252. if (tag_num != 0 && !tag_size) {
  253. pr_err("init_session_tags called with percpu-ida tag_num:"
  254. " %u, but zero tag_size\n", tag_num);
  255. return ERR_PTR(-EINVAL);
  256. }
  257. if (!tag_num && tag_size) {
  258. pr_err("init_session_tags called with percpu-ida tag_size:"
  259. " %u, but zero tag_num\n", tag_size);
  260. return ERR_PTR(-EINVAL);
  261. }
  262. se_sess = transport_init_session(sup_prot_ops);
  263. if (IS_ERR(se_sess))
  264. return se_sess;
  265. rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
  266. if (rc < 0) {
  267. transport_free_session(se_sess);
  268. return ERR_PTR(-ENOMEM);
  269. }
  270. return se_sess;
  271. }
  272. EXPORT_SYMBOL(transport_init_session_tags);
  273. /*
  274. * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
  275. */
  276. void __transport_register_session(
  277. struct se_portal_group *se_tpg,
  278. struct se_node_acl *se_nacl,
  279. struct se_session *se_sess,
  280. void *fabric_sess_ptr)
  281. {
  282. const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
  283. unsigned char buf[PR_REG_ISID_LEN];
  284. se_sess->se_tpg = se_tpg;
  285. se_sess->fabric_sess_ptr = fabric_sess_ptr;
  286. /*
  287. * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
  288. *
  289. * Only set for struct se_session's that will actually be moving I/O.
  290. * eg: *NOT* discovery sessions.
  291. */
  292. if (se_nacl) {
  293. /*
  294. *
  295. * Determine if fabric allows for T10-PI feature bits exposed to
  296. * initiators for device backends with !dev->dev_attrib.pi_prot_type.
  297. *
  298. * If so, then always save prot_type on a per se_node_acl node
  299. * basis and re-instate the previous sess_prot_type to avoid
  300. * disabling PI from below any previously initiator side
  301. * registered LUNs.
  302. */
  303. if (se_nacl->saved_prot_type)
  304. se_sess->sess_prot_type = se_nacl->saved_prot_type;
  305. else if (tfo->tpg_check_prot_fabric_only)
  306. se_sess->sess_prot_type = se_nacl->saved_prot_type =
  307. tfo->tpg_check_prot_fabric_only(se_tpg);
  308. /*
  309. * If the fabric module supports an ISID based TransportID,
  310. * save this value in binary from the fabric I_T Nexus now.
  311. */
  312. if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
  313. memset(&buf[0], 0, PR_REG_ISID_LEN);
  314. se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
  315. &buf[0], PR_REG_ISID_LEN);
  316. se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
  317. }
  318. spin_lock_irq(&se_nacl->nacl_sess_lock);
  319. /*
  320. * The se_nacl->nacl_sess pointer will be set to the
  321. * last active I_T Nexus for each struct se_node_acl.
  322. */
  323. se_nacl->nacl_sess = se_sess;
  324. list_add_tail(&se_sess->sess_acl_list,
  325. &se_nacl->acl_sess_list);
  326. spin_unlock_irq(&se_nacl->nacl_sess_lock);
  327. }
  328. list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
  329. pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
  330. se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
  331. }
  332. EXPORT_SYMBOL(__transport_register_session);
  333. void transport_register_session(
  334. struct se_portal_group *se_tpg,
  335. struct se_node_acl *se_nacl,
  336. struct se_session *se_sess,
  337. void *fabric_sess_ptr)
  338. {
  339. unsigned long flags;
  340. spin_lock_irqsave(&se_tpg->session_lock, flags);
  341. __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
  342. spin_unlock_irqrestore(&se_tpg->session_lock, flags);
  343. }
  344. EXPORT_SYMBOL(transport_register_session);
  345. struct se_session *
  346. target_alloc_session(struct se_portal_group *tpg,
  347. unsigned int tag_num, unsigned int tag_size,
  348. enum target_prot_op prot_op,
  349. const char *initiatorname, void *private,
  350. int (*callback)(struct se_portal_group *,
  351. struct se_session *, void *))
  352. {
  353. struct se_session *sess;
  354. /*
  355. * If the fabric driver is using percpu-ida based pre allocation
  356. * of I/O descriptor tags, go ahead and perform that setup now..
  357. */
  358. if (tag_num != 0)
  359. sess = transport_init_session_tags(tag_num, tag_size, prot_op);
  360. else
  361. sess = transport_init_session(prot_op);
  362. if (IS_ERR(sess))
  363. return sess;
  364. sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
  365. (unsigned char *)initiatorname);
  366. if (!sess->se_node_acl) {
  367. transport_free_session(sess);
  368. return ERR_PTR(-EACCES);
  369. }
  370. /*
  371. * Go ahead and perform any remaining fabric setup that is
  372. * required before transport_register_session().
  373. */
  374. if (callback != NULL) {
  375. int rc = callback(tpg, sess, private);
  376. if (rc) {
  377. transport_free_session(sess);
  378. return ERR_PTR(rc);
  379. }
  380. }
  381. transport_register_session(tpg, sess->se_node_acl, sess, private);
  382. return sess;
  383. }
  384. EXPORT_SYMBOL(target_alloc_session);
  385. static void target_release_session(struct kref *kref)
  386. {
  387. struct se_session *se_sess = container_of(kref,
  388. struct se_session, sess_kref);
  389. struct se_portal_group *se_tpg = se_sess->se_tpg;
  390. se_tpg->se_tpg_tfo->close_session(se_sess);
  391. }
  392. int target_get_session(struct se_session *se_sess)
  393. {
  394. return kref_get_unless_zero(&se_sess->sess_kref);
  395. }
  396. EXPORT_SYMBOL(target_get_session);
  397. void target_put_session(struct se_session *se_sess)
  398. {
  399. kref_put(&se_sess->sess_kref, target_release_session);
  400. }
  401. EXPORT_SYMBOL(target_put_session);
  402. ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
  403. {
  404. struct se_session *se_sess;
  405. ssize_t len = 0;
  406. spin_lock_bh(&se_tpg->session_lock);
  407. list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
  408. if (!se_sess->se_node_acl)
  409. continue;
  410. if (!se_sess->se_node_acl->dynamic_node_acl)
  411. continue;
  412. if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
  413. break;
  414. len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
  415. se_sess->se_node_acl->initiatorname);
  416. len += 1; /* Include NULL terminator */
  417. }
  418. spin_unlock_bh(&se_tpg->session_lock);
  419. return len;
  420. }
  421. EXPORT_SYMBOL(target_show_dynamic_sessions);
  422. static void target_complete_nacl(struct kref *kref)
  423. {
  424. struct se_node_acl *nacl = container_of(kref,
  425. struct se_node_acl, acl_kref);
  426. complete(&nacl->acl_free_comp);
  427. }
  428. void target_put_nacl(struct se_node_acl *nacl)
  429. {
  430. kref_put(&nacl->acl_kref, target_complete_nacl);
  431. }
  432. EXPORT_SYMBOL(target_put_nacl);
  433. void transport_deregister_session_configfs(struct se_session *se_sess)
  434. {
  435. struct se_node_acl *se_nacl;
  436. unsigned long flags;
  437. /*
  438. * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
  439. */
  440. se_nacl = se_sess->se_node_acl;
  441. if (se_nacl) {
  442. spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
  443. if (se_nacl->acl_stop == 0)
  444. list_del(&se_sess->sess_acl_list);
  445. /*
  446. * If the session list is empty, then clear the pointer.
  447. * Otherwise, set the struct se_session pointer from the tail
  448. * element of the per struct se_node_acl active session list.
  449. */
  450. if (list_empty(&se_nacl->acl_sess_list))
  451. se_nacl->nacl_sess = NULL;
  452. else {
  453. se_nacl->nacl_sess = container_of(
  454. se_nacl->acl_sess_list.prev,
  455. struct se_session, sess_acl_list);
  456. }
  457. spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
  458. }
  459. }
  460. EXPORT_SYMBOL(transport_deregister_session_configfs);
  461. void transport_free_session(struct se_session *se_sess)
  462. {
  463. struct se_node_acl *se_nacl = se_sess->se_node_acl;
  464. /*
  465. * Drop the se_node_acl->nacl_kref obtained from within
  466. * core_tpg_get_initiator_node_acl().
  467. */
  468. if (se_nacl) {
  469. se_sess->se_node_acl = NULL;
  470. target_put_nacl(se_nacl);
  471. }
  472. if (se_sess->sess_cmd_map) {
  473. percpu_ida_destroy(&se_sess->sess_tag_pool);
  474. kvfree(se_sess->sess_cmd_map);
  475. }
  476. kmem_cache_free(se_sess_cache, se_sess);
  477. }
  478. EXPORT_SYMBOL(transport_free_session);
  479. void transport_deregister_session(struct se_session *se_sess)
  480. {
  481. struct se_portal_group *se_tpg = se_sess->se_tpg;
  482. const struct target_core_fabric_ops *se_tfo;
  483. struct se_node_acl *se_nacl;
  484. unsigned long flags;
  485. bool drop_nacl = false;
  486. if (!se_tpg) {
  487. transport_free_session(se_sess);
  488. return;
  489. }
  490. se_tfo = se_tpg->se_tpg_tfo;
  491. spin_lock_irqsave(&se_tpg->session_lock, flags);
  492. list_del(&se_sess->sess_list);
  493. se_sess->se_tpg = NULL;
  494. se_sess->fabric_sess_ptr = NULL;
  495. spin_unlock_irqrestore(&se_tpg->session_lock, flags);
  496. /*
  497. * Determine if we need to do extra work for this initiator node's
  498. * struct se_node_acl if it had been previously dynamically generated.
  499. */
  500. se_nacl = se_sess->se_node_acl;
  501. mutex_lock(&se_tpg->acl_node_mutex);
  502. if (se_nacl && se_nacl->dynamic_node_acl) {
  503. if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
  504. list_del(&se_nacl->acl_list);
  505. drop_nacl = true;
  506. }
  507. }
  508. mutex_unlock(&se_tpg->acl_node_mutex);
  509. if (drop_nacl) {
  510. core_tpg_wait_for_nacl_pr_ref(se_nacl);
  511. core_free_device_list_for_node(se_nacl, se_tpg);
  512. se_sess->se_node_acl = NULL;
  513. kfree(se_nacl);
  514. }
  515. pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
  516. se_tpg->se_tpg_tfo->get_fabric_name());
  517. /*
  518. * If last kref is dropping now for an explicit NodeACL, awake sleeping
  519. * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
  520. * removal context from within transport_free_session() code.
  521. */
  522. transport_free_session(se_sess);
  523. }
  524. EXPORT_SYMBOL(transport_deregister_session);
  525. static void target_remove_from_state_list(struct se_cmd *cmd)
  526. {
  527. struct se_device *dev = cmd->se_dev;
  528. unsigned long flags;
  529. if (!dev)
  530. return;
  531. if (cmd->transport_state & CMD_T_BUSY)
  532. return;
  533. spin_lock_irqsave(&dev->execute_task_lock, flags);
  534. if (cmd->state_active) {
  535. list_del(&cmd->state_list);
  536. cmd->state_active = false;
  537. }
  538. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  539. }
  540. static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
  541. bool write_pending)
  542. {
  543. unsigned long flags;
  544. if (remove_from_lists) {
  545. target_remove_from_state_list(cmd);
  546. /*
  547. * Clear struct se_cmd->se_lun before the handoff to FE.
  548. */
  549. cmd->se_lun = NULL;
  550. }
  551. spin_lock_irqsave(&cmd->t_state_lock, flags);
  552. if (write_pending)
  553. cmd->t_state = TRANSPORT_WRITE_PENDING;
  554. /*
  555. * Determine if frontend context caller is requesting the stopping of
  556. * this command for frontend exceptions.
  557. */
  558. if (cmd->transport_state & CMD_T_STOP) {
  559. pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
  560. __func__, __LINE__, cmd->tag);
  561. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  562. complete_all(&cmd->t_transport_stop_comp);
  563. return 1;
  564. }
  565. cmd->transport_state &= ~CMD_T_ACTIVE;
  566. if (remove_from_lists) {
  567. /*
  568. * Some fabric modules like tcm_loop can release
  569. * their internally allocated I/O reference now and
  570. * struct se_cmd now.
  571. *
  572. * Fabric modules are expected to return '1' here if the
  573. * se_cmd being passed is released at this point,
  574. * or zero if not being released.
  575. */
  576. if (cmd->se_tfo->check_stop_free != NULL) {
  577. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  578. return cmd->se_tfo->check_stop_free(cmd);
  579. }
  580. }
  581. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  582. return 0;
  583. }
  584. static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
  585. {
  586. return transport_cmd_check_stop(cmd, true, false);
  587. }
  588. static void transport_lun_remove_cmd(struct se_cmd *cmd)
  589. {
  590. struct se_lun *lun = cmd->se_lun;
  591. if (!lun)
  592. return;
  593. if (cmpxchg(&cmd->lun_ref_active, true, false))
  594. percpu_ref_put(&lun->lun_ref);
  595. }
  596. void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
  597. {
  598. bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
  599. if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
  600. transport_lun_remove_cmd(cmd);
  601. /*
  602. * Allow the fabric driver to unmap any resources before
  603. * releasing the descriptor via TFO->release_cmd()
  604. */
  605. if (remove)
  606. cmd->se_tfo->aborted_task(cmd);
  607. if (transport_cmd_check_stop_to_fabric(cmd))
  608. return;
  609. if (remove && ack_kref)
  610. transport_put_cmd(cmd);
  611. }
  612. static void target_complete_failure_work(struct work_struct *work)
  613. {
  614. struct se_cmd *cmd = container_of(work, struct se_cmd, work);
  615. transport_generic_request_failure(cmd,
  616. TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
  617. }
  618. /*
  619. * Used when asking transport to copy Sense Data from the underlying
  620. * Linux/SCSI struct scsi_cmnd
  621. */
  622. static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
  623. {
  624. struct se_device *dev = cmd->se_dev;
  625. WARN_ON(!cmd->se_lun);
  626. if (!dev)
  627. return NULL;
  628. if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
  629. return NULL;
  630. cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
  631. pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
  632. dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
  633. return cmd->sense_buffer;
  634. }
  635. void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
  636. {
  637. struct se_device *dev = cmd->se_dev;
  638. int success = scsi_status == GOOD;
  639. unsigned long flags;
  640. cmd->scsi_status = scsi_status;
  641. spin_lock_irqsave(&cmd->t_state_lock, flags);
  642. cmd->transport_state &= ~CMD_T_BUSY;
  643. if (dev && dev->transport->transport_complete) {
  644. dev->transport->transport_complete(cmd,
  645. cmd->t_data_sg,
  646. transport_get_sense_buffer(cmd));
  647. if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
  648. success = 1;
  649. }
  650. /*
  651. * Check for case where an explicit ABORT_TASK has been received
  652. * and transport_wait_for_tasks() will be waiting for completion..
  653. */
  654. if (cmd->transport_state & CMD_T_ABORTED ||
  655. cmd->transport_state & CMD_T_STOP) {
  656. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  657. complete_all(&cmd->t_transport_stop_comp);
  658. return;
  659. } else if (!success) {
  660. INIT_WORK(&cmd->work, target_complete_failure_work);
  661. } else {
  662. INIT_WORK(&cmd->work, target_complete_ok_work);
  663. }
  664. cmd->t_state = TRANSPORT_COMPLETE;
  665. cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
  666. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  667. if (cmd->se_cmd_flags & SCF_USE_CPUID)
  668. queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
  669. else
  670. queue_work(target_completion_wq, &cmd->work);
  671. }
  672. EXPORT_SYMBOL(target_complete_cmd);
  673. void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
  674. {
  675. if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
  676. if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
  677. cmd->residual_count += cmd->data_length - length;
  678. } else {
  679. cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
  680. cmd->residual_count = cmd->data_length - length;
  681. }
  682. cmd->data_length = length;
  683. }
  684. target_complete_cmd(cmd, scsi_status);
  685. }
  686. EXPORT_SYMBOL(target_complete_cmd_with_length);
  687. static void target_add_to_state_list(struct se_cmd *cmd)
  688. {
  689. struct se_device *dev = cmd->se_dev;
  690. unsigned long flags;
  691. spin_lock_irqsave(&dev->execute_task_lock, flags);
  692. if (!cmd->state_active) {
  693. list_add_tail(&cmd->state_list, &dev->state_list);
  694. cmd->state_active = true;
  695. }
  696. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  697. }
  698. /*
  699. * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
  700. */
  701. static void transport_write_pending_qf(struct se_cmd *cmd);
  702. static void transport_complete_qf(struct se_cmd *cmd);
  703. void target_qf_do_work(struct work_struct *work)
  704. {
  705. struct se_device *dev = container_of(work, struct se_device,
  706. qf_work_queue);
  707. LIST_HEAD(qf_cmd_list);
  708. struct se_cmd *cmd, *cmd_tmp;
  709. spin_lock_irq(&dev->qf_cmd_lock);
  710. list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
  711. spin_unlock_irq(&dev->qf_cmd_lock);
  712. list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
  713. list_del(&cmd->se_qf_node);
  714. atomic_dec_mb(&dev->dev_qf_count);
  715. pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
  716. " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
  717. (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
  718. (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
  719. : "UNKNOWN");
  720. if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
  721. transport_write_pending_qf(cmd);
  722. else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
  723. transport_complete_qf(cmd);
  724. }
  725. }
  726. unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
  727. {
  728. switch (cmd->data_direction) {
  729. case DMA_NONE:
  730. return "NONE";
  731. case DMA_FROM_DEVICE:
  732. return "READ";
  733. case DMA_TO_DEVICE:
  734. return "WRITE";
  735. case DMA_BIDIRECTIONAL:
  736. return "BIDI";
  737. default:
  738. break;
  739. }
  740. return "UNKNOWN";
  741. }
  742. void transport_dump_dev_state(
  743. struct se_device *dev,
  744. char *b,
  745. int *bl)
  746. {
  747. *bl += sprintf(b + *bl, "Status: ");
  748. if (dev->export_count)
  749. *bl += sprintf(b + *bl, "ACTIVATED");
  750. else
  751. *bl += sprintf(b + *bl, "DEACTIVATED");
  752. *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
  753. *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
  754. dev->dev_attrib.block_size,
  755. dev->dev_attrib.hw_max_sectors);
  756. *bl += sprintf(b + *bl, " ");
  757. }
  758. void transport_dump_vpd_proto_id(
  759. struct t10_vpd *vpd,
  760. unsigned char *p_buf,
  761. int p_buf_len)
  762. {
  763. unsigned char buf[VPD_TMP_BUF_SIZE];
  764. int len;
  765. memset(buf, 0, VPD_TMP_BUF_SIZE);
  766. len = sprintf(buf, "T10 VPD Protocol Identifier: ");
  767. switch (vpd->protocol_identifier) {
  768. case 0x00:
  769. sprintf(buf+len, "Fibre Channel\n");
  770. break;
  771. case 0x10:
  772. sprintf(buf+len, "Parallel SCSI\n");
  773. break;
  774. case 0x20:
  775. sprintf(buf+len, "SSA\n");
  776. break;
  777. case 0x30:
  778. sprintf(buf+len, "IEEE 1394\n");
  779. break;
  780. case 0x40:
  781. sprintf(buf+len, "SCSI Remote Direct Memory Access"
  782. " Protocol\n");
  783. break;
  784. case 0x50:
  785. sprintf(buf+len, "Internet SCSI (iSCSI)\n");
  786. break;
  787. case 0x60:
  788. sprintf(buf+len, "SAS Serial SCSI Protocol\n");
  789. break;
  790. case 0x70:
  791. sprintf(buf+len, "Automation/Drive Interface Transport"
  792. " Protocol\n");
  793. break;
  794. case 0x80:
  795. sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
  796. break;
  797. default:
  798. sprintf(buf+len, "Unknown 0x%02x\n",
  799. vpd->protocol_identifier);
  800. break;
  801. }
  802. if (p_buf)
  803. strncpy(p_buf, buf, p_buf_len);
  804. else
  805. pr_debug("%s", buf);
  806. }
  807. void
  808. transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
  809. {
  810. /*
  811. * Check if the Protocol Identifier Valid (PIV) bit is set..
  812. *
  813. * from spc3r23.pdf section 7.5.1
  814. */
  815. if (page_83[1] & 0x80) {
  816. vpd->protocol_identifier = (page_83[0] & 0xf0);
  817. vpd->protocol_identifier_set = 1;
  818. transport_dump_vpd_proto_id(vpd, NULL, 0);
  819. }
  820. }
  821. EXPORT_SYMBOL(transport_set_vpd_proto_id);
  822. int transport_dump_vpd_assoc(
  823. struct t10_vpd *vpd,
  824. unsigned char *p_buf,
  825. int p_buf_len)
  826. {
  827. unsigned char buf[VPD_TMP_BUF_SIZE];
  828. int ret = 0;
  829. int len;
  830. memset(buf, 0, VPD_TMP_BUF_SIZE);
  831. len = sprintf(buf, "T10 VPD Identifier Association: ");
  832. switch (vpd->association) {
  833. case 0x00:
  834. sprintf(buf+len, "addressed logical unit\n");
  835. break;
  836. case 0x10:
  837. sprintf(buf+len, "target port\n");
  838. break;
  839. case 0x20:
  840. sprintf(buf+len, "SCSI target device\n");
  841. break;
  842. default:
  843. sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
  844. ret = -EINVAL;
  845. break;
  846. }
  847. if (p_buf)
  848. strncpy(p_buf, buf, p_buf_len);
  849. else
  850. pr_debug("%s", buf);
  851. return ret;
  852. }
  853. int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
  854. {
  855. /*
  856. * The VPD identification association..
  857. *
  858. * from spc3r23.pdf Section 7.6.3.1 Table 297
  859. */
  860. vpd->association = (page_83[1] & 0x30);
  861. return transport_dump_vpd_assoc(vpd, NULL, 0);
  862. }
  863. EXPORT_SYMBOL(transport_set_vpd_assoc);
  864. int transport_dump_vpd_ident_type(
  865. struct t10_vpd *vpd,
  866. unsigned char *p_buf,
  867. int p_buf_len)
  868. {
  869. unsigned char buf[VPD_TMP_BUF_SIZE];
  870. int ret = 0;
  871. int len;
  872. memset(buf, 0, VPD_TMP_BUF_SIZE);
  873. len = sprintf(buf, "T10 VPD Identifier Type: ");
  874. switch (vpd->device_identifier_type) {
  875. case 0x00:
  876. sprintf(buf+len, "Vendor specific\n");
  877. break;
  878. case 0x01:
  879. sprintf(buf+len, "T10 Vendor ID based\n");
  880. break;
  881. case 0x02:
  882. sprintf(buf+len, "EUI-64 based\n");
  883. break;
  884. case 0x03:
  885. sprintf(buf+len, "NAA\n");
  886. break;
  887. case 0x04:
  888. sprintf(buf+len, "Relative target port identifier\n");
  889. break;
  890. case 0x08:
  891. sprintf(buf+len, "SCSI name string\n");
  892. break;
  893. default:
  894. sprintf(buf+len, "Unsupported: 0x%02x\n",
  895. vpd->device_identifier_type);
  896. ret = -EINVAL;
  897. break;
  898. }
  899. if (p_buf) {
  900. if (p_buf_len < strlen(buf)+1)
  901. return -EINVAL;
  902. strncpy(p_buf, buf, p_buf_len);
  903. } else {
  904. pr_debug("%s", buf);
  905. }
  906. return ret;
  907. }
  908. int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
  909. {
  910. /*
  911. * The VPD identifier type..
  912. *
  913. * from spc3r23.pdf Section 7.6.3.1 Table 298
  914. */
  915. vpd->device_identifier_type = (page_83[1] & 0x0f);
  916. return transport_dump_vpd_ident_type(vpd, NULL, 0);
  917. }
  918. EXPORT_SYMBOL(transport_set_vpd_ident_type);
  919. int transport_dump_vpd_ident(
  920. struct t10_vpd *vpd,
  921. unsigned char *p_buf,
  922. int p_buf_len)
  923. {
  924. unsigned char buf[VPD_TMP_BUF_SIZE];
  925. int ret = 0;
  926. memset(buf, 0, VPD_TMP_BUF_SIZE);
  927. switch (vpd->device_identifier_code_set) {
  928. case 0x01: /* Binary */
  929. snprintf(buf, sizeof(buf),
  930. "T10 VPD Binary Device Identifier: %s\n",
  931. &vpd->device_identifier[0]);
  932. break;
  933. case 0x02: /* ASCII */
  934. snprintf(buf, sizeof(buf),
  935. "T10 VPD ASCII Device Identifier: %s\n",
  936. &vpd->device_identifier[0]);
  937. break;
  938. case 0x03: /* UTF-8 */
  939. snprintf(buf, sizeof(buf),
  940. "T10 VPD UTF-8 Device Identifier: %s\n",
  941. &vpd->device_identifier[0]);
  942. break;
  943. default:
  944. sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
  945. " 0x%02x", vpd->device_identifier_code_set);
  946. ret = -EINVAL;
  947. break;
  948. }
  949. if (p_buf)
  950. strncpy(p_buf, buf, p_buf_len);
  951. else
  952. pr_debug("%s", buf);
  953. return ret;
  954. }
  955. int
  956. transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
  957. {
  958. static const char hex_str[] = "0123456789abcdef";
  959. int j = 0, i = 4; /* offset to start of the identifier */
  960. /*
  961. * The VPD Code Set (encoding)
  962. *
  963. * from spc3r23.pdf Section 7.6.3.1 Table 296
  964. */
  965. vpd->device_identifier_code_set = (page_83[0] & 0x0f);
  966. switch (vpd->device_identifier_code_set) {
  967. case 0x01: /* Binary */
  968. vpd->device_identifier[j++] =
  969. hex_str[vpd->device_identifier_type];
  970. while (i < (4 + page_83[3])) {
  971. vpd->device_identifier[j++] =
  972. hex_str[(page_83[i] & 0xf0) >> 4];
  973. vpd->device_identifier[j++] =
  974. hex_str[page_83[i] & 0x0f];
  975. i++;
  976. }
  977. break;
  978. case 0x02: /* ASCII */
  979. case 0x03: /* UTF-8 */
  980. while (i < (4 + page_83[3]))
  981. vpd->device_identifier[j++] = page_83[i++];
  982. break;
  983. default:
  984. break;
  985. }
  986. return transport_dump_vpd_ident(vpd, NULL, 0);
  987. }
  988. EXPORT_SYMBOL(transport_set_vpd_ident);
  989. static sense_reason_t
  990. target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
  991. unsigned int size)
  992. {
  993. u32 mtl;
  994. if (!cmd->se_tfo->max_data_sg_nents)
  995. return TCM_NO_SENSE;
  996. /*
  997. * Check if fabric enforced maximum SGL entries per I/O descriptor
  998. * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT +
  999. * residual_count and reduce original cmd->data_length to maximum
  1000. * length based on single PAGE_SIZE entry scatter-lists.
  1001. */
  1002. mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
  1003. if (cmd->data_length > mtl) {
  1004. /*
  1005. * If an existing CDB overflow is present, calculate new residual
  1006. * based on CDB size minus fabric maximum transfer length.
  1007. *
  1008. * If an existing CDB underflow is present, calculate new residual
  1009. * based on original cmd->data_length minus fabric maximum transfer
  1010. * length.
  1011. *
  1012. * Otherwise, set the underflow residual based on cmd->data_length
  1013. * minus fabric maximum transfer length.
  1014. */
  1015. if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
  1016. cmd->residual_count = (size - mtl);
  1017. } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
  1018. u32 orig_dl = size + cmd->residual_count;
  1019. cmd->residual_count = (orig_dl - mtl);
  1020. } else {
  1021. cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
  1022. cmd->residual_count = (cmd->data_length - mtl);
  1023. }
  1024. cmd->data_length = mtl;
  1025. /*
  1026. * Reset sbc_check_prot() calculated protection payload
  1027. * length based upon the new smaller MTL.
  1028. */
  1029. if (cmd->prot_length) {
  1030. u32 sectors = (mtl / dev->dev_attrib.block_size);
  1031. cmd->prot_length = dev->prot_length * sectors;
  1032. }
  1033. }
  1034. return TCM_NO_SENSE;
  1035. }
  1036. sense_reason_t
  1037. target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
  1038. {
  1039. struct se_device *dev = cmd->se_dev;
  1040. if (cmd->unknown_data_length) {
  1041. cmd->data_length = size;
  1042. } else if (size != cmd->data_length) {
  1043. pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
  1044. " %u does not match SCSI CDB Length: %u for SAM Opcode:"
  1045. " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
  1046. cmd->data_length, size, cmd->t_task_cdb[0]);
  1047. if (cmd->data_direction == DMA_TO_DEVICE &&
  1048. cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
  1049. pr_err("Rejecting underflow/overflow WRITE data\n");
  1050. return TCM_INVALID_CDB_FIELD;
  1051. }
  1052. /*
  1053. * Reject READ_* or WRITE_* with overflow/underflow for
  1054. * type SCF_SCSI_DATA_CDB.
  1055. */
  1056. if (dev->dev_attrib.block_size != 512) {
  1057. pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
  1058. " CDB on non 512-byte sector setup subsystem"
  1059. " plugin: %s\n", dev->transport->name);
  1060. /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
  1061. return TCM_INVALID_CDB_FIELD;
  1062. }
  1063. /*
  1064. * For the overflow case keep the existing fabric provided
  1065. * ->data_length. Otherwise for the underflow case, reset
  1066. * ->data_length to the smaller SCSI expected data transfer
  1067. * length.
  1068. */
  1069. if (size > cmd->data_length) {
  1070. cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
  1071. cmd->residual_count = (size - cmd->data_length);
  1072. } else {
  1073. cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
  1074. cmd->residual_count = (cmd->data_length - size);
  1075. cmd->data_length = size;
  1076. }
  1077. }
  1078. return target_check_max_data_sg_nents(cmd, dev, size);
  1079. }
  1080. /*
  1081. * Used by fabric modules containing a local struct se_cmd within their
  1082. * fabric dependent per I/O descriptor.
  1083. *
  1084. * Preserves the value of @cmd->tag.
  1085. */
  1086. void transport_init_se_cmd(
  1087. struct se_cmd *cmd,
  1088. const struct target_core_fabric_ops *tfo,
  1089. struct se_session *se_sess,
  1090. u32 data_length,
  1091. int data_direction,
  1092. int task_attr,
  1093. unsigned char *sense_buffer)
  1094. {
  1095. INIT_LIST_HEAD(&cmd->se_delayed_node);
  1096. INIT_LIST_HEAD(&cmd->se_qf_node);
  1097. INIT_LIST_HEAD(&cmd->se_cmd_list);
  1098. INIT_LIST_HEAD(&cmd->state_list);
  1099. init_completion(&cmd->t_transport_stop_comp);
  1100. init_completion(&cmd->cmd_wait_comp);
  1101. spin_lock_init(&cmd->t_state_lock);
  1102. kref_init(&cmd->cmd_kref);
  1103. cmd->transport_state = CMD_T_DEV_ACTIVE;
  1104. cmd->se_tfo = tfo;
  1105. cmd->se_sess = se_sess;
  1106. cmd->data_length = data_length;
  1107. cmd->data_direction = data_direction;
  1108. cmd->sam_task_attr = task_attr;
  1109. cmd->sense_buffer = sense_buffer;
  1110. cmd->state_active = false;
  1111. }
  1112. EXPORT_SYMBOL(transport_init_se_cmd);
  1113. static sense_reason_t
  1114. transport_check_alloc_task_attr(struct se_cmd *cmd)
  1115. {
  1116. struct se_device *dev = cmd->se_dev;
  1117. /*
  1118. * Check if SAM Task Attribute emulation is enabled for this
  1119. * struct se_device storage object
  1120. */
  1121. if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
  1122. return 0;
  1123. if (cmd->sam_task_attr == TCM_ACA_TAG) {
  1124. pr_debug("SAM Task Attribute ACA"
  1125. " emulation is not supported\n");
  1126. return TCM_INVALID_CDB_FIELD;
  1127. }
  1128. return 0;
  1129. }
  1130. sense_reason_t
  1131. target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
  1132. {
  1133. struct se_device *dev = cmd->se_dev;
  1134. sense_reason_t ret;
  1135. /*
  1136. * Ensure that the received CDB is less than the max (252 + 8) bytes
  1137. * for VARIABLE_LENGTH_CMD
  1138. */
  1139. if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
  1140. pr_err("Received SCSI CDB with command_size: %d that"
  1141. " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
  1142. scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
  1143. return TCM_INVALID_CDB_FIELD;
  1144. }
  1145. /*
  1146. * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
  1147. * allocate the additional extended CDB buffer now.. Otherwise
  1148. * setup the pointer from __t_task_cdb to t_task_cdb.
  1149. */
  1150. if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
  1151. cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
  1152. GFP_KERNEL);
  1153. if (!cmd->t_task_cdb) {
  1154. pr_err("Unable to allocate cmd->t_task_cdb"
  1155. " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
  1156. scsi_command_size(cdb),
  1157. (unsigned long)sizeof(cmd->__t_task_cdb));
  1158. return TCM_OUT_OF_RESOURCES;
  1159. }
  1160. } else
  1161. cmd->t_task_cdb = &cmd->__t_task_cdb[0];
  1162. /*
  1163. * Copy the original CDB into cmd->
  1164. */
  1165. memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
  1166. trace_target_sequencer_start(cmd);
  1167. /*
  1168. * Check for an existing UNIT ATTENTION condition
  1169. */
  1170. ret = target_scsi3_ua_check(cmd);
  1171. if (ret)
  1172. return ret;
  1173. ret = target_alua_state_check(cmd);
  1174. if (ret)
  1175. return ret;
  1176. ret = target_check_reservation(cmd);
  1177. if (ret) {
  1178. cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
  1179. return ret;
  1180. }
  1181. ret = dev->transport->parse_cdb(cmd);
  1182. if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
  1183. pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
  1184. cmd->se_tfo->get_fabric_name(),
  1185. cmd->se_sess->se_node_acl->initiatorname,
  1186. cmd->t_task_cdb[0]);
  1187. if (ret)
  1188. return ret;
  1189. ret = transport_check_alloc_task_attr(cmd);
  1190. if (ret)
  1191. return ret;
  1192. cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
  1193. atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
  1194. return 0;
  1195. }
  1196. EXPORT_SYMBOL(target_setup_cmd_from_cdb);
  1197. /*
  1198. * Used by fabric module frontends to queue tasks directly.
  1199. * May only be used from process context.
  1200. */
  1201. int transport_handle_cdb_direct(
  1202. struct se_cmd *cmd)
  1203. {
  1204. sense_reason_t ret;
  1205. if (!cmd->se_lun) {
  1206. dump_stack();
  1207. pr_err("cmd->se_lun is NULL\n");
  1208. return -EINVAL;
  1209. }
  1210. if (in_interrupt()) {
  1211. dump_stack();
  1212. pr_err("transport_generic_handle_cdb cannot be called"
  1213. " from interrupt context\n");
  1214. return -EINVAL;
  1215. }
  1216. /*
  1217. * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
  1218. * outstanding descriptors are handled correctly during shutdown via
  1219. * transport_wait_for_tasks()
  1220. *
  1221. * Also, we don't take cmd->t_state_lock here as we only expect
  1222. * this to be called for initial descriptor submission.
  1223. */
  1224. cmd->t_state = TRANSPORT_NEW_CMD;
  1225. cmd->transport_state |= CMD_T_ACTIVE;
  1226. /*
  1227. * transport_generic_new_cmd() is already handling QUEUE_FULL,
  1228. * so follow TRANSPORT_NEW_CMD processing thread context usage
  1229. * and call transport_generic_request_failure() if necessary..
  1230. */
  1231. ret = transport_generic_new_cmd(cmd);
  1232. if (ret)
  1233. transport_generic_request_failure(cmd, ret);
  1234. return 0;
  1235. }
  1236. EXPORT_SYMBOL(transport_handle_cdb_direct);
  1237. sense_reason_t
  1238. transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
  1239. u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
  1240. {
  1241. if (!sgl || !sgl_count)
  1242. return 0;
  1243. /*
  1244. * Reject SCSI data overflow with map_mem_to_cmd() as incoming
  1245. * scatterlists already have been set to follow what the fabric
  1246. * passes for the original expected data transfer length.
  1247. */
  1248. if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
  1249. pr_warn("Rejecting SCSI DATA overflow for fabric using"
  1250. " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
  1251. return TCM_INVALID_CDB_FIELD;
  1252. }
  1253. cmd->t_data_sg = sgl;
  1254. cmd->t_data_nents = sgl_count;
  1255. cmd->t_bidi_data_sg = sgl_bidi;
  1256. cmd->t_bidi_data_nents = sgl_bidi_count;
  1257. cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
  1258. return 0;
  1259. }
  1260. /*
  1261. * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
  1262. * se_cmd + use pre-allocated SGL memory.
  1263. *
  1264. * @se_cmd: command descriptor to submit
  1265. * @se_sess: associated se_sess for endpoint
  1266. * @cdb: pointer to SCSI CDB
  1267. * @sense: pointer to SCSI sense buffer
  1268. * @unpacked_lun: unpacked LUN to reference for struct se_lun
  1269. * @data_length: fabric expected data transfer length
  1270. * @task_addr: SAM task attribute
  1271. * @data_dir: DMA data direction
  1272. * @flags: flags for command submission from target_sc_flags_tables
  1273. * @sgl: struct scatterlist memory for unidirectional mapping
  1274. * @sgl_count: scatterlist count for unidirectional mapping
  1275. * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
  1276. * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
  1277. * @sgl_prot: struct scatterlist memory protection information
  1278. * @sgl_prot_count: scatterlist count for protection information
  1279. *
  1280. * Task tags are supported if the caller has set @se_cmd->tag.
  1281. *
  1282. * Returns non zero to signal active I/O shutdown failure. All other
  1283. * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
  1284. * but still return zero here.
  1285. *
  1286. * This may only be called from process context, and also currently
  1287. * assumes internal allocation of fabric payload buffer by target-core.
  1288. */
  1289. int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
  1290. unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
  1291. u32 data_length, int task_attr, int data_dir, int flags,
  1292. struct scatterlist *sgl, u32 sgl_count,
  1293. struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
  1294. struct scatterlist *sgl_prot, u32 sgl_prot_count)
  1295. {
  1296. struct se_portal_group *se_tpg;
  1297. sense_reason_t rc;
  1298. int ret;
  1299. se_tpg = se_sess->se_tpg;
  1300. BUG_ON(!se_tpg);
  1301. BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
  1302. BUG_ON(in_interrupt());
  1303. /*
  1304. * Initialize se_cmd for target operation. From this point
  1305. * exceptions are handled by sending exception status via
  1306. * target_core_fabric_ops->queue_status() callback
  1307. */
  1308. transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
  1309. data_length, data_dir, task_attr, sense);
  1310. if (flags & TARGET_SCF_USE_CPUID)
  1311. se_cmd->se_cmd_flags |= SCF_USE_CPUID;
  1312. else
  1313. se_cmd->cpuid = WORK_CPU_UNBOUND;
  1314. if (flags & TARGET_SCF_UNKNOWN_SIZE)
  1315. se_cmd->unknown_data_length = 1;
  1316. /*
  1317. * Obtain struct se_cmd->cmd_kref reference and add new cmd to
  1318. * se_sess->sess_cmd_list. A second kref_get here is necessary
  1319. * for fabrics using TARGET_SCF_ACK_KREF that expect a second
  1320. * kref_put() to happen during fabric packet acknowledgement.
  1321. */
  1322. ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
  1323. if (ret)
  1324. return ret;
  1325. /*
  1326. * Signal bidirectional data payloads to target-core
  1327. */
  1328. if (flags & TARGET_SCF_BIDI_OP)
  1329. se_cmd->se_cmd_flags |= SCF_BIDI;
  1330. /*
  1331. * Locate se_lun pointer and attach it to struct se_cmd
  1332. */
  1333. rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
  1334. if (rc) {
  1335. transport_send_check_condition_and_sense(se_cmd, rc, 0);
  1336. target_put_sess_cmd(se_cmd);
  1337. return 0;
  1338. }
  1339. rc = target_setup_cmd_from_cdb(se_cmd, cdb);
  1340. if (rc != 0) {
  1341. transport_generic_request_failure(se_cmd, rc);
  1342. return 0;
  1343. }
  1344. /*
  1345. * Save pointers for SGLs containing protection information,
  1346. * if present.
  1347. */
  1348. if (sgl_prot_count) {
  1349. se_cmd->t_prot_sg = sgl_prot;
  1350. se_cmd->t_prot_nents = sgl_prot_count;
  1351. se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
  1352. }
  1353. /*
  1354. * When a non zero sgl_count has been passed perform SGL passthrough
  1355. * mapping for pre-allocated fabric memory instead of having target
  1356. * core perform an internal SGL allocation..
  1357. */
  1358. if (sgl_count != 0) {
  1359. BUG_ON(!sgl);
  1360. /*
  1361. * A work-around for tcm_loop as some userspace code via
  1362. * scsi-generic do not memset their associated read buffers,
  1363. * so go ahead and do that here for type non-data CDBs. Also
  1364. * note that this is currently guaranteed to be a single SGL
  1365. * for this case by target core in target_setup_cmd_from_cdb()
  1366. * -> transport_generic_cmd_sequencer().
  1367. */
  1368. if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
  1369. se_cmd->data_direction == DMA_FROM_DEVICE) {
  1370. unsigned char *buf = NULL;
  1371. if (sgl)
  1372. buf = kmap(sg_page(sgl)) + sgl->offset;
  1373. if (buf) {
  1374. memset(buf, 0, sgl->length);
  1375. kunmap(sg_page(sgl));
  1376. }
  1377. }
  1378. rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
  1379. sgl_bidi, sgl_bidi_count);
  1380. if (rc != 0) {
  1381. transport_generic_request_failure(se_cmd, rc);
  1382. return 0;
  1383. }
  1384. }
  1385. /*
  1386. * Check if we need to delay processing because of ALUA
  1387. * Active/NonOptimized primary access state..
  1388. */
  1389. core_alua_check_nonop_delay(se_cmd);
  1390. transport_handle_cdb_direct(se_cmd);
  1391. return 0;
  1392. }
  1393. EXPORT_SYMBOL(target_submit_cmd_map_sgls);
  1394. /*
  1395. * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
  1396. *
  1397. * @se_cmd: command descriptor to submit
  1398. * @se_sess: associated se_sess for endpoint
  1399. * @cdb: pointer to SCSI CDB
  1400. * @sense: pointer to SCSI sense buffer
  1401. * @unpacked_lun: unpacked LUN to reference for struct se_lun
  1402. * @data_length: fabric expected data transfer length
  1403. * @task_addr: SAM task attribute
  1404. * @data_dir: DMA data direction
  1405. * @flags: flags for command submission from target_sc_flags_tables
  1406. *
  1407. * Task tags are supported if the caller has set @se_cmd->tag.
  1408. *
  1409. * Returns non zero to signal active I/O shutdown failure. All other
  1410. * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
  1411. * but still return zero here.
  1412. *
  1413. * This may only be called from process context, and also currently
  1414. * assumes internal allocation of fabric payload buffer by target-core.
  1415. *
  1416. * It also assumes interal target core SGL memory allocation.
  1417. */
  1418. int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
  1419. unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
  1420. u32 data_length, int task_attr, int data_dir, int flags)
  1421. {
  1422. return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
  1423. unpacked_lun, data_length, task_attr, data_dir,
  1424. flags, NULL, 0, NULL, 0, NULL, 0);
  1425. }
  1426. EXPORT_SYMBOL(target_submit_cmd);
  1427. static void target_complete_tmr_failure(struct work_struct *work)
  1428. {
  1429. struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
  1430. se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
  1431. se_cmd->se_tfo->queue_tm_rsp(se_cmd);
  1432. transport_cmd_check_stop_to_fabric(se_cmd);
  1433. }
  1434. /**
  1435. * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
  1436. * for TMR CDBs
  1437. *
  1438. * @se_cmd: command descriptor to submit
  1439. * @se_sess: associated se_sess for endpoint
  1440. * @sense: pointer to SCSI sense buffer
  1441. * @unpacked_lun: unpacked LUN to reference for struct se_lun
  1442. * @fabric_context: fabric context for TMR req
  1443. * @tm_type: Type of TM request
  1444. * @gfp: gfp type for caller
  1445. * @tag: referenced task tag for TMR_ABORT_TASK
  1446. * @flags: submit cmd flags
  1447. *
  1448. * Callable from all contexts.
  1449. **/
  1450. int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
  1451. unsigned char *sense, u64 unpacked_lun,
  1452. void *fabric_tmr_ptr, unsigned char tm_type,
  1453. gfp_t gfp, u64 tag, int flags)
  1454. {
  1455. struct se_portal_group *se_tpg;
  1456. int ret;
  1457. se_tpg = se_sess->se_tpg;
  1458. BUG_ON(!se_tpg);
  1459. transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
  1460. 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
  1461. /*
  1462. * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
  1463. * allocation failure.
  1464. */
  1465. ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
  1466. if (ret < 0)
  1467. return -ENOMEM;
  1468. if (tm_type == TMR_ABORT_TASK)
  1469. se_cmd->se_tmr_req->ref_task_tag = tag;
  1470. /* See target_submit_cmd for commentary */
  1471. ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
  1472. if (ret) {
  1473. core_tmr_release_req(se_cmd->se_tmr_req);
  1474. return ret;
  1475. }
  1476. ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
  1477. if (ret) {
  1478. /*
  1479. * For callback during failure handling, push this work off
  1480. * to process context with TMR_LUN_DOES_NOT_EXIST status.
  1481. */
  1482. INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
  1483. schedule_work(&se_cmd->work);
  1484. return 0;
  1485. }
  1486. transport_generic_handle_tmr(se_cmd);
  1487. return 0;
  1488. }
  1489. EXPORT_SYMBOL(target_submit_tmr);
  1490. /*
  1491. * Handle SAM-esque emulation for generic transport request failures.
  1492. */
  1493. void transport_generic_request_failure(struct se_cmd *cmd,
  1494. sense_reason_t sense_reason)
  1495. {
  1496. int ret = 0, post_ret = 0;
  1497. pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
  1498. " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
  1499. pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
  1500. cmd->se_tfo->get_cmd_state(cmd),
  1501. cmd->t_state, sense_reason);
  1502. pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
  1503. (cmd->transport_state & CMD_T_ACTIVE) != 0,
  1504. (cmd->transport_state & CMD_T_STOP) != 0,
  1505. (cmd->transport_state & CMD_T_SENT) != 0);
  1506. /*
  1507. * For SAM Task Attribute emulation for failed struct se_cmd
  1508. */
  1509. transport_complete_task_attr(cmd);
  1510. /*
  1511. * Handle special case for COMPARE_AND_WRITE failure, where the
  1512. * callback is expected to drop the per device ->caw_sem.
  1513. */
  1514. if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
  1515. cmd->transport_complete_callback)
  1516. cmd->transport_complete_callback(cmd, false, &post_ret);
  1517. switch (sense_reason) {
  1518. case TCM_NON_EXISTENT_LUN:
  1519. case TCM_UNSUPPORTED_SCSI_OPCODE:
  1520. case TCM_INVALID_CDB_FIELD:
  1521. case TCM_INVALID_PARAMETER_LIST:
  1522. case TCM_PARAMETER_LIST_LENGTH_ERROR:
  1523. case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
  1524. case TCM_UNKNOWN_MODE_PAGE:
  1525. case TCM_WRITE_PROTECTED:
  1526. case TCM_ADDRESS_OUT_OF_RANGE:
  1527. case TCM_CHECK_CONDITION_ABORT_CMD:
  1528. case TCM_CHECK_CONDITION_UNIT_ATTENTION:
  1529. case TCM_CHECK_CONDITION_NOT_READY:
  1530. case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
  1531. case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
  1532. case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
  1533. break;
  1534. case TCM_OUT_OF_RESOURCES:
  1535. sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  1536. break;
  1537. case TCM_RESERVATION_CONFLICT:
  1538. /*
  1539. * No SENSE Data payload for this case, set SCSI Status
  1540. * and queue the response to $FABRIC_MOD.
  1541. *
  1542. * Uses linux/include/scsi/scsi.h SAM status codes defs
  1543. */
  1544. cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
  1545. /*
  1546. * For UA Interlock Code 11b, a RESERVATION CONFLICT will
  1547. * establish a UNIT ATTENTION with PREVIOUS RESERVATION
  1548. * CONFLICT STATUS.
  1549. *
  1550. * See spc4r17, section 7.4.6 Control Mode Page, Table 349
  1551. */
  1552. if (cmd->se_sess &&
  1553. cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
  1554. target_ua_allocate_lun(cmd->se_sess->se_node_acl,
  1555. cmd->orig_fe_lun, 0x2C,
  1556. ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
  1557. }
  1558. trace_target_cmd_complete(cmd);
  1559. ret = cmd->se_tfo->queue_status(cmd);
  1560. if (ret == -EAGAIN || ret == -ENOMEM)
  1561. goto queue_full;
  1562. goto check_stop;
  1563. default:
  1564. pr_err("Unknown transport error for CDB 0x%02x: %d\n",
  1565. cmd->t_task_cdb[0], sense_reason);
  1566. sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
  1567. break;
  1568. }
  1569. ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
  1570. if (ret == -EAGAIN || ret == -ENOMEM)
  1571. goto queue_full;
  1572. check_stop:
  1573. transport_lun_remove_cmd(cmd);
  1574. transport_cmd_check_stop_to_fabric(cmd);
  1575. return;
  1576. queue_full:
  1577. cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
  1578. transport_handle_queue_full(cmd, cmd->se_dev);
  1579. }
  1580. EXPORT_SYMBOL(transport_generic_request_failure);
  1581. void __target_execute_cmd(struct se_cmd *cmd)
  1582. {
  1583. sense_reason_t ret;
  1584. if (cmd->execute_cmd) {
  1585. ret = cmd->execute_cmd(cmd);
  1586. if (ret) {
  1587. spin_lock_irq(&cmd->t_state_lock);
  1588. cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
  1589. spin_unlock_irq(&cmd->t_state_lock);
  1590. transport_generic_request_failure(cmd, ret);
  1591. }
  1592. }
  1593. }
  1594. static int target_write_prot_action(struct se_cmd *cmd)
  1595. {
  1596. u32 sectors;
  1597. /*
  1598. * Perform WRITE_INSERT of PI using software emulation when backend
  1599. * device has PI enabled, if the transport has not already generated
  1600. * PI using hardware WRITE_INSERT offload.
  1601. */
  1602. switch (cmd->prot_op) {
  1603. case TARGET_PROT_DOUT_INSERT:
  1604. if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
  1605. sbc_dif_generate(cmd);
  1606. break;
  1607. case TARGET_PROT_DOUT_STRIP:
  1608. if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
  1609. break;
  1610. sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
  1611. cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
  1612. sectors, 0, cmd->t_prot_sg, 0);
  1613. if (unlikely(cmd->pi_err)) {
  1614. spin_lock_irq(&cmd->t_state_lock);
  1615. cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
  1616. spin_unlock_irq(&cmd->t_state_lock);
  1617. transport_generic_request_failure(cmd, cmd->pi_err);
  1618. return -1;
  1619. }
  1620. break;
  1621. default:
  1622. break;
  1623. }
  1624. return 0;
  1625. }
  1626. static bool target_handle_task_attr(struct se_cmd *cmd)
  1627. {
  1628. struct se_device *dev = cmd->se_dev;
  1629. if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
  1630. return false;
  1631. /*
  1632. * Check for the existence of HEAD_OF_QUEUE, and if true return 1
  1633. * to allow the passed struct se_cmd list of tasks to the front of the list.
  1634. */
  1635. switch (cmd->sam_task_attr) {
  1636. case TCM_HEAD_TAG:
  1637. pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
  1638. cmd->t_task_cdb[0]);
  1639. return false;
  1640. case TCM_ORDERED_TAG:
  1641. atomic_inc_mb(&dev->dev_ordered_sync);
  1642. pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
  1643. cmd->t_task_cdb[0]);
  1644. /*
  1645. * Execute an ORDERED command if no other older commands
  1646. * exist that need to be completed first.
  1647. */
  1648. if (!atomic_read(&dev->simple_cmds))
  1649. return false;
  1650. break;
  1651. default:
  1652. /*
  1653. * For SIMPLE and UNTAGGED Task Attribute commands
  1654. */
  1655. atomic_inc_mb(&dev->simple_cmds);
  1656. break;
  1657. }
  1658. if (atomic_read(&dev->dev_ordered_sync) == 0)
  1659. return false;
  1660. spin_lock(&dev->delayed_cmd_lock);
  1661. list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
  1662. spin_unlock(&dev->delayed_cmd_lock);
  1663. pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
  1664. cmd->t_task_cdb[0], cmd->sam_task_attr);
  1665. return true;
  1666. }
  1667. static int __transport_check_aborted_status(struct se_cmd *, int);
  1668. void target_execute_cmd(struct se_cmd *cmd)
  1669. {
  1670. /*
  1671. * Determine if frontend context caller is requesting the stopping of
  1672. * this command for frontend exceptions.
  1673. *
  1674. * If the received CDB has aleady been aborted stop processing it here.
  1675. */
  1676. spin_lock_irq(&cmd->t_state_lock);
  1677. if (__transport_check_aborted_status(cmd, 1)) {
  1678. spin_unlock_irq(&cmd->t_state_lock);
  1679. return;
  1680. }
  1681. if (cmd->transport_state & CMD_T_STOP) {
  1682. pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
  1683. __func__, __LINE__, cmd->tag);
  1684. spin_unlock_irq(&cmd->t_state_lock);
  1685. complete_all(&cmd->t_transport_stop_comp);
  1686. return;
  1687. }
  1688. cmd->t_state = TRANSPORT_PROCESSING;
  1689. cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
  1690. spin_unlock_irq(&cmd->t_state_lock);
  1691. if (target_write_prot_action(cmd))
  1692. return;
  1693. if (target_handle_task_attr(cmd)) {
  1694. spin_lock_irq(&cmd->t_state_lock);
  1695. cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
  1696. spin_unlock_irq(&cmd->t_state_lock);
  1697. return;
  1698. }
  1699. __target_execute_cmd(cmd);
  1700. }
  1701. EXPORT_SYMBOL(target_execute_cmd);
  1702. /*
  1703. * Process all commands up to the last received ORDERED task attribute which
  1704. * requires another blocking boundary
  1705. */
  1706. static void target_restart_delayed_cmds(struct se_device *dev)
  1707. {
  1708. for (;;) {
  1709. struct se_cmd *cmd;
  1710. spin_lock(&dev->delayed_cmd_lock);
  1711. if (list_empty(&dev->delayed_cmd_list)) {
  1712. spin_unlock(&dev->delayed_cmd_lock);
  1713. break;
  1714. }
  1715. cmd = list_entry(dev->delayed_cmd_list.next,
  1716. struct se_cmd, se_delayed_node);
  1717. list_del(&cmd->se_delayed_node);
  1718. spin_unlock(&dev->delayed_cmd_lock);
  1719. __target_execute_cmd(cmd);
  1720. if (cmd->sam_task_attr == TCM_ORDERED_TAG)
  1721. break;
  1722. }
  1723. }
  1724. /*
  1725. * Called from I/O completion to determine which dormant/delayed
  1726. * and ordered cmds need to have their tasks added to the execution queue.
  1727. */
  1728. static void transport_complete_task_attr(struct se_cmd *cmd)
  1729. {
  1730. struct se_device *dev = cmd->se_dev;
  1731. if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
  1732. return;
  1733. if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
  1734. atomic_dec_mb(&dev->simple_cmds);
  1735. dev->dev_cur_ordered_id++;
  1736. pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
  1737. dev->dev_cur_ordered_id);
  1738. } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
  1739. dev->dev_cur_ordered_id++;
  1740. pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
  1741. dev->dev_cur_ordered_id);
  1742. } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
  1743. atomic_dec_mb(&dev->dev_ordered_sync);
  1744. dev->dev_cur_ordered_id++;
  1745. pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
  1746. dev->dev_cur_ordered_id);
  1747. }
  1748. target_restart_delayed_cmds(dev);
  1749. }
  1750. static void transport_complete_qf(struct se_cmd *cmd)
  1751. {
  1752. int ret = 0;
  1753. transport_complete_task_attr(cmd);
  1754. if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
  1755. trace_target_cmd_complete(cmd);
  1756. ret = cmd->se_tfo->queue_status(cmd);
  1757. goto out;
  1758. }
  1759. switch (cmd->data_direction) {
  1760. case DMA_FROM_DEVICE:
  1761. if (cmd->scsi_status)
  1762. goto queue_status;
  1763. trace_target_cmd_complete(cmd);
  1764. ret = cmd->se_tfo->queue_data_in(cmd);
  1765. break;
  1766. case DMA_TO_DEVICE:
  1767. if (cmd->se_cmd_flags & SCF_BIDI) {
  1768. ret = cmd->se_tfo->queue_data_in(cmd);
  1769. break;
  1770. }
  1771. /* Fall through for DMA_TO_DEVICE */
  1772. case DMA_NONE:
  1773. queue_status:
  1774. trace_target_cmd_complete(cmd);
  1775. ret = cmd->se_tfo->queue_status(cmd);
  1776. break;
  1777. default:
  1778. break;
  1779. }
  1780. out:
  1781. if (ret < 0) {
  1782. transport_handle_queue_full(cmd, cmd->se_dev);
  1783. return;
  1784. }
  1785. transport_lun_remove_cmd(cmd);
  1786. transport_cmd_check_stop_to_fabric(cmd);
  1787. }
  1788. static void transport_handle_queue_full(
  1789. struct se_cmd *cmd,
  1790. struct se_device *dev)
  1791. {
  1792. spin_lock_irq(&dev->qf_cmd_lock);
  1793. list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
  1794. atomic_inc_mb(&dev->dev_qf_count);
  1795. spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
  1796. schedule_work(&cmd->se_dev->qf_work_queue);
  1797. }
  1798. static bool target_read_prot_action(struct se_cmd *cmd)
  1799. {
  1800. switch (cmd->prot_op) {
  1801. case TARGET_PROT_DIN_STRIP:
  1802. if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
  1803. u32 sectors = cmd->data_length >>
  1804. ilog2(cmd->se_dev->dev_attrib.block_size);
  1805. cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
  1806. sectors, 0, cmd->t_prot_sg,
  1807. 0);
  1808. if (cmd->pi_err)
  1809. return true;
  1810. }
  1811. break;
  1812. case TARGET_PROT_DIN_INSERT:
  1813. if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
  1814. break;
  1815. sbc_dif_generate(cmd);
  1816. break;
  1817. default:
  1818. break;
  1819. }
  1820. return false;
  1821. }
  1822. static void target_complete_ok_work(struct work_struct *work)
  1823. {
  1824. struct se_cmd *cmd = container_of(work, struct se_cmd, work);
  1825. int ret;
  1826. /*
  1827. * Check if we need to move delayed/dormant tasks from cmds on the
  1828. * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
  1829. * Attribute.
  1830. */
  1831. transport_complete_task_attr(cmd);
  1832. /*
  1833. * Check to schedule QUEUE_FULL work, or execute an existing
  1834. * cmd->transport_qf_callback()
  1835. */
  1836. if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
  1837. schedule_work(&cmd->se_dev->qf_work_queue);
  1838. /*
  1839. * Check if we need to send a sense buffer from
  1840. * the struct se_cmd in question.
  1841. */
  1842. if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
  1843. WARN_ON(!cmd->scsi_status);
  1844. ret = transport_send_check_condition_and_sense(
  1845. cmd, 0, 1);
  1846. if (ret == -EAGAIN || ret == -ENOMEM)
  1847. goto queue_full;
  1848. transport_lun_remove_cmd(cmd);
  1849. transport_cmd_check_stop_to_fabric(cmd);
  1850. return;
  1851. }
  1852. /*
  1853. * Check for a callback, used by amongst other things
  1854. * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
  1855. */
  1856. if (cmd->transport_complete_callback) {
  1857. sense_reason_t rc;
  1858. bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
  1859. bool zero_dl = !(cmd->data_length);
  1860. int post_ret = 0;
  1861. rc = cmd->transport_complete_callback(cmd, true, &post_ret);
  1862. if (!rc && !post_ret) {
  1863. if (caw && zero_dl)
  1864. goto queue_rsp;
  1865. return;
  1866. } else if (rc) {
  1867. ret = transport_send_check_condition_and_sense(cmd,
  1868. rc, 0);
  1869. if (ret == -EAGAIN || ret == -ENOMEM)
  1870. goto queue_full;
  1871. transport_lun_remove_cmd(cmd);
  1872. transport_cmd_check_stop_to_fabric(cmd);
  1873. return;
  1874. }
  1875. }
  1876. queue_rsp:
  1877. switch (cmd->data_direction) {
  1878. case DMA_FROM_DEVICE:
  1879. if (cmd->scsi_status)
  1880. goto queue_status;
  1881. atomic_long_add(cmd->data_length,
  1882. &cmd->se_lun->lun_stats.tx_data_octets);
  1883. /*
  1884. * Perform READ_STRIP of PI using software emulation when
  1885. * backend had PI enabled, if the transport will not be
  1886. * performing hardware READ_STRIP offload.
  1887. */
  1888. if (target_read_prot_action(cmd)) {
  1889. ret = transport_send_check_condition_and_sense(cmd,
  1890. cmd->pi_err, 0);
  1891. if (ret == -EAGAIN || ret == -ENOMEM)
  1892. goto queue_full;
  1893. transport_lun_remove_cmd(cmd);
  1894. transport_cmd_check_stop_to_fabric(cmd);
  1895. return;
  1896. }
  1897. trace_target_cmd_complete(cmd);
  1898. ret = cmd->se_tfo->queue_data_in(cmd);
  1899. if (ret == -EAGAIN || ret == -ENOMEM)
  1900. goto queue_full;
  1901. break;
  1902. case DMA_TO_DEVICE:
  1903. atomic_long_add(cmd->data_length,
  1904. &cmd->se_lun->lun_stats.rx_data_octets);
  1905. /*
  1906. * Check if we need to send READ payload for BIDI-COMMAND
  1907. */
  1908. if (cmd->se_cmd_flags & SCF_BIDI) {
  1909. atomic_long_add(cmd->data_length,
  1910. &cmd->se_lun->lun_stats.tx_data_octets);
  1911. ret = cmd->se_tfo->queue_data_in(cmd);
  1912. if (ret == -EAGAIN || ret == -ENOMEM)
  1913. goto queue_full;
  1914. break;
  1915. }
  1916. /* Fall through for DMA_TO_DEVICE */
  1917. case DMA_NONE:
  1918. queue_status:
  1919. trace_target_cmd_complete(cmd);
  1920. ret = cmd->se_tfo->queue_status(cmd);
  1921. if (ret == -EAGAIN || ret == -ENOMEM)
  1922. goto queue_full;
  1923. break;
  1924. default:
  1925. break;
  1926. }
  1927. transport_lun_remove_cmd(cmd);
  1928. transport_cmd_check_stop_to_fabric(cmd);
  1929. return;
  1930. queue_full:
  1931. pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
  1932. " data_direction: %d\n", cmd, cmd->data_direction);
  1933. cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
  1934. transport_handle_queue_full(cmd, cmd->se_dev);
  1935. }
  1936. static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
  1937. {
  1938. struct scatterlist *sg;
  1939. int count;
  1940. for_each_sg(sgl, sg, nents, count)
  1941. __free_page(sg_page(sg));
  1942. kfree(sgl);
  1943. }
  1944. static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
  1945. {
  1946. /*
  1947. * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
  1948. * emulation, and free + reset pointers if necessary..
  1949. */
  1950. if (!cmd->t_data_sg_orig)
  1951. return;
  1952. kfree(cmd->t_data_sg);
  1953. cmd->t_data_sg = cmd->t_data_sg_orig;
  1954. cmd->t_data_sg_orig = NULL;
  1955. cmd->t_data_nents = cmd->t_data_nents_orig;
  1956. cmd->t_data_nents_orig = 0;
  1957. }
  1958. static inline void transport_free_pages(struct se_cmd *cmd)
  1959. {
  1960. if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
  1961. transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
  1962. cmd->t_prot_sg = NULL;
  1963. cmd->t_prot_nents = 0;
  1964. }
  1965. if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
  1966. /*
  1967. * Release special case READ buffer payload required for
  1968. * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
  1969. */
  1970. if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
  1971. transport_free_sgl(cmd->t_bidi_data_sg,
  1972. cmd->t_bidi_data_nents);
  1973. cmd->t_bidi_data_sg = NULL;
  1974. cmd->t_bidi_data_nents = 0;
  1975. }
  1976. transport_reset_sgl_orig(cmd);
  1977. return;
  1978. }
  1979. transport_reset_sgl_orig(cmd);
  1980. transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
  1981. cmd->t_data_sg = NULL;
  1982. cmd->t_data_nents = 0;
  1983. transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
  1984. cmd->t_bidi_data_sg = NULL;
  1985. cmd->t_bidi_data_nents = 0;
  1986. }
  1987. /**
  1988. * transport_put_cmd - release a reference to a command
  1989. * @cmd: command to release
  1990. *
  1991. * This routine releases our reference to the command and frees it if possible.
  1992. */
  1993. static int transport_put_cmd(struct se_cmd *cmd)
  1994. {
  1995. BUG_ON(!cmd->se_tfo);
  1996. /*
  1997. * If this cmd has been setup with target_get_sess_cmd(), drop
  1998. * the kref and call ->release_cmd() in kref callback.
  1999. */
  2000. return target_put_sess_cmd(cmd);
  2001. }
  2002. void *transport_kmap_data_sg(struct se_cmd *cmd)
  2003. {
  2004. struct scatterlist *sg = cmd->t_data_sg;
  2005. struct page **pages;
  2006. int i;
  2007. /*
  2008. * We need to take into account a possible offset here for fabrics like
  2009. * tcm_loop who may be using a contig buffer from the SCSI midlayer for
  2010. * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
  2011. */
  2012. if (!cmd->t_data_nents)
  2013. return NULL;
  2014. BUG_ON(!sg);
  2015. if (cmd->t_data_nents == 1)
  2016. return kmap(sg_page(sg)) + sg->offset;
  2017. /* >1 page. use vmap */
  2018. pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
  2019. if (!pages)
  2020. return NULL;
  2021. /* convert sg[] to pages[] */
  2022. for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
  2023. pages[i] = sg_page(sg);
  2024. }
  2025. cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
  2026. kfree(pages);
  2027. if (!cmd->t_data_vmap)
  2028. return NULL;
  2029. return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
  2030. }
  2031. EXPORT_SYMBOL(transport_kmap_data_sg);
  2032. void transport_kunmap_data_sg(struct se_cmd *cmd)
  2033. {
  2034. if (!cmd->t_data_nents) {
  2035. return;
  2036. } else if (cmd->t_data_nents == 1) {
  2037. kunmap(sg_page(cmd->t_data_sg));
  2038. return;
  2039. }
  2040. vunmap(cmd->t_data_vmap);
  2041. cmd->t_data_vmap = NULL;
  2042. }
  2043. EXPORT_SYMBOL(transport_kunmap_data_sg);
  2044. int
  2045. target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
  2046. bool zero_page)
  2047. {
  2048. struct scatterlist *sg;
  2049. struct page *page;
  2050. gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
  2051. unsigned int nent;
  2052. int i = 0;
  2053. nent = DIV_ROUND_UP(length, PAGE_SIZE);
  2054. sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
  2055. if (!sg)
  2056. return -ENOMEM;
  2057. sg_init_table(sg, nent);
  2058. while (length) {
  2059. u32 page_len = min_t(u32, length, PAGE_SIZE);
  2060. page = alloc_page(GFP_KERNEL | zero_flag);
  2061. if (!page)
  2062. goto out;
  2063. sg_set_page(&sg[i], page, page_len, 0);
  2064. length -= page_len;
  2065. i++;
  2066. }
  2067. *sgl = sg;
  2068. *nents = nent;
  2069. return 0;
  2070. out:
  2071. while (i > 0) {
  2072. i--;
  2073. __free_page(sg_page(&sg[i]));
  2074. }
  2075. kfree(sg);
  2076. return -ENOMEM;
  2077. }
  2078. /*
  2079. * Allocate any required resources to execute the command. For writes we
  2080. * might not have the payload yet, so notify the fabric via a call to
  2081. * ->write_pending instead. Otherwise place it on the execution queue.
  2082. */
  2083. sense_reason_t
  2084. transport_generic_new_cmd(struct se_cmd *cmd)
  2085. {
  2086. int ret = 0;
  2087. bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
  2088. if (cmd->prot_op != TARGET_PROT_NORMAL &&
  2089. !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
  2090. ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
  2091. cmd->prot_length, true);
  2092. if (ret < 0)
  2093. return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  2094. }
  2095. /*
  2096. * Determine is the TCM fabric module has already allocated physical
  2097. * memory, and is directly calling transport_generic_map_mem_to_cmd()
  2098. * beforehand.
  2099. */
  2100. if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
  2101. cmd->data_length) {
  2102. if ((cmd->se_cmd_flags & SCF_BIDI) ||
  2103. (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
  2104. u32 bidi_length;
  2105. if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
  2106. bidi_length = cmd->t_task_nolb *
  2107. cmd->se_dev->dev_attrib.block_size;
  2108. else
  2109. bidi_length = cmd->data_length;
  2110. ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
  2111. &cmd->t_bidi_data_nents,
  2112. bidi_length, zero_flag);
  2113. if (ret < 0)
  2114. return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  2115. }
  2116. ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
  2117. cmd->data_length, zero_flag);
  2118. if (ret < 0)
  2119. return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  2120. } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
  2121. cmd->data_length) {
  2122. /*
  2123. * Special case for COMPARE_AND_WRITE with fabrics
  2124. * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
  2125. */
  2126. u32 caw_length = cmd->t_task_nolb *
  2127. cmd->se_dev->dev_attrib.block_size;
  2128. ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
  2129. &cmd->t_bidi_data_nents,
  2130. caw_length, zero_flag);
  2131. if (ret < 0)
  2132. return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  2133. }
  2134. /*
  2135. * If this command is not a write we can execute it right here,
  2136. * for write buffers we need to notify the fabric driver first
  2137. * and let it call back once the write buffers are ready.
  2138. */
  2139. target_add_to_state_list(cmd);
  2140. if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
  2141. target_execute_cmd(cmd);
  2142. return 0;
  2143. }
  2144. transport_cmd_check_stop(cmd, false, true);
  2145. ret = cmd->se_tfo->write_pending(cmd);
  2146. if (ret == -EAGAIN || ret == -ENOMEM)
  2147. goto queue_full;
  2148. /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
  2149. WARN_ON(ret);
  2150. return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  2151. queue_full:
  2152. pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
  2153. cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
  2154. transport_handle_queue_full(cmd, cmd->se_dev);
  2155. return 0;
  2156. }
  2157. EXPORT_SYMBOL(transport_generic_new_cmd);
  2158. static void transport_write_pending_qf(struct se_cmd *cmd)
  2159. {
  2160. int ret;
  2161. ret = cmd->se_tfo->write_pending(cmd);
  2162. if (ret == -EAGAIN || ret == -ENOMEM) {
  2163. pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
  2164. cmd);
  2165. transport_handle_queue_full(cmd, cmd->se_dev);
  2166. }
  2167. }
  2168. static bool
  2169. __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
  2170. unsigned long *flags);
  2171. static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
  2172. {
  2173. unsigned long flags;
  2174. spin_lock_irqsave(&cmd->t_state_lock, flags);
  2175. __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
  2176. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2177. }
  2178. int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
  2179. {
  2180. int ret = 0;
  2181. bool aborted = false, tas = false;
  2182. if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
  2183. if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
  2184. target_wait_free_cmd(cmd, &aborted, &tas);
  2185. if (!aborted || tas)
  2186. ret = transport_put_cmd(cmd);
  2187. } else {
  2188. if (wait_for_tasks)
  2189. target_wait_free_cmd(cmd, &aborted, &tas);
  2190. /*
  2191. * Handle WRITE failure case where transport_generic_new_cmd()
  2192. * has already added se_cmd to state_list, but fabric has
  2193. * failed command before I/O submission.
  2194. */
  2195. if (cmd->state_active)
  2196. target_remove_from_state_list(cmd);
  2197. if (cmd->se_lun)
  2198. transport_lun_remove_cmd(cmd);
  2199. if (!aborted || tas)
  2200. ret = transport_put_cmd(cmd);
  2201. }
  2202. /*
  2203. * If the task has been internally aborted due to TMR ABORT_TASK
  2204. * or LUN_RESET, target_core_tmr.c is responsible for performing
  2205. * the remaining calls to target_put_sess_cmd(), and not the
  2206. * callers of this function.
  2207. */
  2208. if (aborted) {
  2209. pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
  2210. wait_for_completion(&cmd->cmd_wait_comp);
  2211. cmd->se_tfo->release_cmd(cmd);
  2212. ret = 1;
  2213. }
  2214. return ret;
  2215. }
  2216. EXPORT_SYMBOL(transport_generic_free_cmd);
  2217. /* target_get_sess_cmd - Add command to active ->sess_cmd_list
  2218. * @se_cmd: command descriptor to add
  2219. * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
  2220. */
  2221. int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
  2222. {
  2223. struct se_session *se_sess = se_cmd->se_sess;
  2224. unsigned long flags;
  2225. int ret = 0;
  2226. /*
  2227. * Add a second kref if the fabric caller is expecting to handle
  2228. * fabric acknowledgement that requires two target_put_sess_cmd()
  2229. * invocations before se_cmd descriptor release.
  2230. */
  2231. if (ack_kref)
  2232. kref_get(&se_cmd->cmd_kref);
  2233. spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
  2234. if (se_sess->sess_tearing_down) {
  2235. ret = -ESHUTDOWN;
  2236. goto out;
  2237. }
  2238. list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
  2239. out:
  2240. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  2241. if (ret && ack_kref)
  2242. target_put_sess_cmd(se_cmd);
  2243. return ret;
  2244. }
  2245. EXPORT_SYMBOL(target_get_sess_cmd);
  2246. static void target_free_cmd_mem(struct se_cmd *cmd)
  2247. {
  2248. transport_free_pages(cmd);
  2249. if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
  2250. core_tmr_release_req(cmd->se_tmr_req);
  2251. if (cmd->t_task_cdb != cmd->__t_task_cdb)
  2252. kfree(cmd->t_task_cdb);
  2253. }
  2254. static void target_release_cmd_kref(struct kref *kref)
  2255. {
  2256. struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
  2257. struct se_session *se_sess = se_cmd->se_sess;
  2258. unsigned long flags;
  2259. bool fabric_stop;
  2260. spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
  2261. if (list_empty(&se_cmd->se_cmd_list)) {
  2262. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  2263. target_free_cmd_mem(se_cmd);
  2264. se_cmd->se_tfo->release_cmd(se_cmd);
  2265. return;
  2266. }
  2267. spin_lock(&se_cmd->t_state_lock);
  2268. fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
  2269. spin_unlock(&se_cmd->t_state_lock);
  2270. if (se_cmd->cmd_wait_set || fabric_stop) {
  2271. list_del_init(&se_cmd->se_cmd_list);
  2272. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  2273. target_free_cmd_mem(se_cmd);
  2274. complete(&se_cmd->cmd_wait_comp);
  2275. return;
  2276. }
  2277. list_del_init(&se_cmd->se_cmd_list);
  2278. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  2279. target_free_cmd_mem(se_cmd);
  2280. se_cmd->se_tfo->release_cmd(se_cmd);
  2281. }
  2282. /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
  2283. * @se_cmd: command descriptor to drop
  2284. */
  2285. int target_put_sess_cmd(struct se_cmd *se_cmd)
  2286. {
  2287. struct se_session *se_sess = se_cmd->se_sess;
  2288. if (!se_sess) {
  2289. target_free_cmd_mem(se_cmd);
  2290. se_cmd->se_tfo->release_cmd(se_cmd);
  2291. return 1;
  2292. }
  2293. return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
  2294. }
  2295. EXPORT_SYMBOL(target_put_sess_cmd);
  2296. /* target_sess_cmd_list_set_waiting - Flag all commands in
  2297. * sess_cmd_list to complete cmd_wait_comp. Set
  2298. * sess_tearing_down so no more commands are queued.
  2299. * @se_sess: session to flag
  2300. */
  2301. void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
  2302. {
  2303. struct se_cmd *se_cmd;
  2304. unsigned long flags;
  2305. int rc;
  2306. spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
  2307. if (se_sess->sess_tearing_down) {
  2308. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  2309. return;
  2310. }
  2311. se_sess->sess_tearing_down = 1;
  2312. list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
  2313. list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
  2314. rc = kref_get_unless_zero(&se_cmd->cmd_kref);
  2315. if (rc) {
  2316. se_cmd->cmd_wait_set = 1;
  2317. spin_lock(&se_cmd->t_state_lock);
  2318. se_cmd->transport_state |= CMD_T_FABRIC_STOP;
  2319. spin_unlock(&se_cmd->t_state_lock);
  2320. }
  2321. }
  2322. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  2323. }
  2324. EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
  2325. /* target_wait_for_sess_cmds - Wait for outstanding descriptors
  2326. * @se_sess: session to wait for active I/O
  2327. */
  2328. void target_wait_for_sess_cmds(struct se_session *se_sess)
  2329. {
  2330. struct se_cmd *se_cmd, *tmp_cmd;
  2331. unsigned long flags;
  2332. bool tas;
  2333. list_for_each_entry_safe(se_cmd, tmp_cmd,
  2334. &se_sess->sess_wait_list, se_cmd_list) {
  2335. pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
  2336. " %d\n", se_cmd, se_cmd->t_state,
  2337. se_cmd->se_tfo->get_cmd_state(se_cmd));
  2338. spin_lock_irqsave(&se_cmd->t_state_lock, flags);
  2339. tas = (se_cmd->transport_state & CMD_T_TAS);
  2340. spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
  2341. if (!target_put_sess_cmd(se_cmd)) {
  2342. if (tas)
  2343. target_put_sess_cmd(se_cmd);
  2344. }
  2345. wait_for_completion(&se_cmd->cmd_wait_comp);
  2346. pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
  2347. " fabric state: %d\n", se_cmd, se_cmd->t_state,
  2348. se_cmd->se_tfo->get_cmd_state(se_cmd));
  2349. se_cmd->se_tfo->release_cmd(se_cmd);
  2350. }
  2351. spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
  2352. WARN_ON(!list_empty(&se_sess->sess_cmd_list));
  2353. spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
  2354. }
  2355. EXPORT_SYMBOL(target_wait_for_sess_cmds);
  2356. void transport_clear_lun_ref(struct se_lun *lun)
  2357. {
  2358. percpu_ref_kill(&lun->lun_ref);
  2359. wait_for_completion(&lun->lun_ref_comp);
  2360. }
  2361. static bool
  2362. __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
  2363. bool *aborted, bool *tas, unsigned long *flags)
  2364. __releases(&cmd->t_state_lock)
  2365. __acquires(&cmd->t_state_lock)
  2366. {
  2367. assert_spin_locked(&cmd->t_state_lock);
  2368. WARN_ON_ONCE(!irqs_disabled());
  2369. if (fabric_stop)
  2370. cmd->transport_state |= CMD_T_FABRIC_STOP;
  2371. if (cmd->transport_state & CMD_T_ABORTED)
  2372. *aborted = true;
  2373. if (cmd->transport_state & CMD_T_TAS)
  2374. *tas = true;
  2375. if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
  2376. !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
  2377. return false;
  2378. if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
  2379. !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
  2380. return false;
  2381. if (!(cmd->transport_state & CMD_T_ACTIVE))
  2382. return false;
  2383. if (fabric_stop && *aborted)
  2384. return false;
  2385. cmd->transport_state |= CMD_T_STOP;
  2386. pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
  2387. " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
  2388. cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
  2389. spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
  2390. wait_for_completion(&cmd->t_transport_stop_comp);
  2391. spin_lock_irqsave(&cmd->t_state_lock, *flags);
  2392. cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
  2393. pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
  2394. "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
  2395. return true;
  2396. }
  2397. /**
  2398. * transport_wait_for_tasks - wait for completion to occur
  2399. * @cmd: command to wait
  2400. *
  2401. * Called from frontend fabric context to wait for storage engine
  2402. * to pause and/or release frontend generated struct se_cmd.
  2403. */
  2404. bool transport_wait_for_tasks(struct se_cmd *cmd)
  2405. {
  2406. unsigned long flags;
  2407. bool ret, aborted = false, tas = false;
  2408. spin_lock_irqsave(&cmd->t_state_lock, flags);
  2409. ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
  2410. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2411. return ret;
  2412. }
  2413. EXPORT_SYMBOL(transport_wait_for_tasks);
  2414. struct sense_info {
  2415. u8 key;
  2416. u8 asc;
  2417. u8 ascq;
  2418. bool add_sector_info;
  2419. };
  2420. static const struct sense_info sense_info_table[] = {
  2421. [TCM_NO_SENSE] = {
  2422. .key = NOT_READY
  2423. },
  2424. [TCM_NON_EXISTENT_LUN] = {
  2425. .key = ILLEGAL_REQUEST,
  2426. .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
  2427. },
  2428. [TCM_UNSUPPORTED_SCSI_OPCODE] = {
  2429. .key = ILLEGAL_REQUEST,
  2430. .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
  2431. },
  2432. [TCM_SECTOR_COUNT_TOO_MANY] = {
  2433. .key = ILLEGAL_REQUEST,
  2434. .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
  2435. },
  2436. [TCM_UNKNOWN_MODE_PAGE] = {
  2437. .key = ILLEGAL_REQUEST,
  2438. .asc = 0x24, /* INVALID FIELD IN CDB */
  2439. },
  2440. [TCM_CHECK_CONDITION_ABORT_CMD] = {
  2441. .key = ABORTED_COMMAND,
  2442. .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
  2443. .ascq = 0x03,
  2444. },
  2445. [TCM_INCORRECT_AMOUNT_OF_DATA] = {
  2446. .key = ABORTED_COMMAND,
  2447. .asc = 0x0c, /* WRITE ERROR */
  2448. .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
  2449. },
  2450. [TCM_INVALID_CDB_FIELD] = {
  2451. .key = ILLEGAL_REQUEST,
  2452. .asc = 0x24, /* INVALID FIELD IN CDB */
  2453. },
  2454. [TCM_INVALID_PARAMETER_LIST] = {
  2455. .key = ILLEGAL_REQUEST,
  2456. .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
  2457. },
  2458. [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
  2459. .key = ILLEGAL_REQUEST,
  2460. .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
  2461. },
  2462. [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
  2463. .key = ILLEGAL_REQUEST,
  2464. .asc = 0x0c, /* WRITE ERROR */
  2465. .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
  2466. },
  2467. [TCM_SERVICE_CRC_ERROR] = {
  2468. .key = ABORTED_COMMAND,
  2469. .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
  2470. .ascq = 0x05, /* N/A */
  2471. },
  2472. [TCM_SNACK_REJECTED] = {
  2473. .key = ABORTED_COMMAND,
  2474. .asc = 0x11, /* READ ERROR */
  2475. .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
  2476. },
  2477. [TCM_WRITE_PROTECTED] = {
  2478. .key = DATA_PROTECT,
  2479. .asc = 0x27, /* WRITE PROTECTED */
  2480. },
  2481. [TCM_ADDRESS_OUT_OF_RANGE] = {
  2482. .key = ILLEGAL_REQUEST,
  2483. .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
  2484. },
  2485. [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
  2486. .key = UNIT_ATTENTION,
  2487. },
  2488. [TCM_CHECK_CONDITION_NOT_READY] = {
  2489. .key = NOT_READY,
  2490. },
  2491. [TCM_MISCOMPARE_VERIFY] = {
  2492. .key = MISCOMPARE,
  2493. .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
  2494. .ascq = 0x00,
  2495. },
  2496. [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
  2497. .key = ABORTED_COMMAND,
  2498. .asc = 0x10,
  2499. .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
  2500. .add_sector_info = true,
  2501. },
  2502. [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
  2503. .key = ABORTED_COMMAND,
  2504. .asc = 0x10,
  2505. .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
  2506. .add_sector_info = true,
  2507. },
  2508. [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
  2509. .key = ABORTED_COMMAND,
  2510. .asc = 0x10,
  2511. .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
  2512. .add_sector_info = true,
  2513. },
  2514. [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
  2515. /*
  2516. * Returning ILLEGAL REQUEST would cause immediate IO errors on
  2517. * Solaris initiators. Returning NOT READY instead means the
  2518. * operations will be retried a finite number of times and we
  2519. * can survive intermittent errors.
  2520. */
  2521. .key = NOT_READY,
  2522. .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
  2523. },
  2524. };
  2525. static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
  2526. {
  2527. const struct sense_info *si;
  2528. u8 *buffer = cmd->sense_buffer;
  2529. int r = (__force int)reason;
  2530. u8 asc, ascq;
  2531. bool desc_format = target_sense_desc_format(cmd->se_dev);
  2532. if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
  2533. si = &sense_info_table[r];
  2534. else
  2535. si = &sense_info_table[(__force int)
  2536. TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
  2537. if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
  2538. core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
  2539. WARN_ON_ONCE(asc == 0);
  2540. } else if (si->asc == 0) {
  2541. WARN_ON_ONCE(cmd->scsi_asc == 0);
  2542. asc = cmd->scsi_asc;
  2543. ascq = cmd->scsi_ascq;
  2544. } else {
  2545. asc = si->asc;
  2546. ascq = si->ascq;
  2547. }
  2548. scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
  2549. if (si->add_sector_info)
  2550. return scsi_set_sense_information(buffer,
  2551. cmd->scsi_sense_length,
  2552. cmd->bad_sector);
  2553. return 0;
  2554. }
  2555. int
  2556. transport_send_check_condition_and_sense(struct se_cmd *cmd,
  2557. sense_reason_t reason, int from_transport)
  2558. {
  2559. unsigned long flags;
  2560. spin_lock_irqsave(&cmd->t_state_lock, flags);
  2561. if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
  2562. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2563. return 0;
  2564. }
  2565. cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
  2566. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2567. if (!from_transport) {
  2568. int rc;
  2569. cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
  2570. cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
  2571. cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
  2572. rc = translate_sense_reason(cmd, reason);
  2573. if (rc)
  2574. return rc;
  2575. }
  2576. trace_target_cmd_complete(cmd);
  2577. return cmd->se_tfo->queue_status(cmd);
  2578. }
  2579. EXPORT_SYMBOL(transport_send_check_condition_and_sense);
  2580. static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
  2581. __releases(&cmd->t_state_lock)
  2582. __acquires(&cmd->t_state_lock)
  2583. {
  2584. assert_spin_locked(&cmd->t_state_lock);
  2585. WARN_ON_ONCE(!irqs_disabled());
  2586. if (!(cmd->transport_state & CMD_T_ABORTED))
  2587. return 0;
  2588. /*
  2589. * If cmd has been aborted but either no status is to be sent or it has
  2590. * already been sent, just return
  2591. */
  2592. if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
  2593. if (send_status)
  2594. cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
  2595. return 1;
  2596. }
  2597. pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
  2598. " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
  2599. cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
  2600. cmd->scsi_status = SAM_STAT_TASK_ABORTED;
  2601. trace_target_cmd_complete(cmd);
  2602. spin_unlock_irq(&cmd->t_state_lock);
  2603. cmd->se_tfo->queue_status(cmd);
  2604. spin_lock_irq(&cmd->t_state_lock);
  2605. return 1;
  2606. }
  2607. int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
  2608. {
  2609. int ret;
  2610. spin_lock_irq(&cmd->t_state_lock);
  2611. ret = __transport_check_aborted_status(cmd, send_status);
  2612. spin_unlock_irq(&cmd->t_state_lock);
  2613. return ret;
  2614. }
  2615. EXPORT_SYMBOL(transport_check_aborted_status);
  2616. void transport_send_task_abort(struct se_cmd *cmd)
  2617. {
  2618. unsigned long flags;
  2619. spin_lock_irqsave(&cmd->t_state_lock, flags);
  2620. if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
  2621. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2622. return;
  2623. }
  2624. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2625. /*
  2626. * If there are still expected incoming fabric WRITEs, we wait
  2627. * until until they have completed before sending a TASK_ABORTED
  2628. * response. This response with TASK_ABORTED status will be
  2629. * queued back to fabric module by transport_check_aborted_status().
  2630. */
  2631. if (cmd->data_direction == DMA_TO_DEVICE) {
  2632. if (cmd->se_tfo->write_pending_status(cmd) != 0) {
  2633. spin_lock_irqsave(&cmd->t_state_lock, flags);
  2634. if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
  2635. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2636. goto send_abort;
  2637. }
  2638. cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
  2639. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2640. return;
  2641. }
  2642. }
  2643. send_abort:
  2644. cmd->scsi_status = SAM_STAT_TASK_ABORTED;
  2645. transport_lun_remove_cmd(cmd);
  2646. pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
  2647. cmd->t_task_cdb[0], cmd->tag);
  2648. trace_target_cmd_complete(cmd);
  2649. cmd->se_tfo->queue_status(cmd);
  2650. }
  2651. static void target_tmr_work(struct work_struct *work)
  2652. {
  2653. struct se_cmd *cmd = container_of(work, struct se_cmd, work);
  2654. struct se_device *dev = cmd->se_dev;
  2655. struct se_tmr_req *tmr = cmd->se_tmr_req;
  2656. unsigned long flags;
  2657. int ret;
  2658. spin_lock_irqsave(&cmd->t_state_lock, flags);
  2659. if (cmd->transport_state & CMD_T_ABORTED) {
  2660. tmr->response = TMR_FUNCTION_REJECTED;
  2661. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2662. goto check_stop;
  2663. }
  2664. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2665. switch (tmr->function) {
  2666. case TMR_ABORT_TASK:
  2667. core_tmr_abort_task(dev, tmr, cmd->se_sess);
  2668. break;
  2669. case TMR_ABORT_TASK_SET:
  2670. case TMR_CLEAR_ACA:
  2671. case TMR_CLEAR_TASK_SET:
  2672. tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
  2673. break;
  2674. case TMR_LUN_RESET:
  2675. ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
  2676. tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
  2677. TMR_FUNCTION_REJECTED;
  2678. if (tmr->response == TMR_FUNCTION_COMPLETE) {
  2679. target_ua_allocate_lun(cmd->se_sess->se_node_acl,
  2680. cmd->orig_fe_lun, 0x29,
  2681. ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
  2682. }
  2683. break;
  2684. case TMR_TARGET_WARM_RESET:
  2685. tmr->response = TMR_FUNCTION_REJECTED;
  2686. break;
  2687. case TMR_TARGET_COLD_RESET:
  2688. tmr->response = TMR_FUNCTION_REJECTED;
  2689. break;
  2690. default:
  2691. pr_err("Uknown TMR function: 0x%02x.\n",
  2692. tmr->function);
  2693. tmr->response = TMR_FUNCTION_REJECTED;
  2694. break;
  2695. }
  2696. spin_lock_irqsave(&cmd->t_state_lock, flags);
  2697. if (cmd->transport_state & CMD_T_ABORTED) {
  2698. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2699. goto check_stop;
  2700. }
  2701. cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
  2702. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2703. cmd->se_tfo->queue_tm_rsp(cmd);
  2704. check_stop:
  2705. transport_cmd_check_stop_to_fabric(cmd);
  2706. }
  2707. int transport_generic_handle_tmr(
  2708. struct se_cmd *cmd)
  2709. {
  2710. unsigned long flags;
  2711. spin_lock_irqsave(&cmd->t_state_lock, flags);
  2712. cmd->transport_state |= CMD_T_ACTIVE;
  2713. spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  2714. INIT_WORK(&cmd->work, target_tmr_work);
  2715. queue_work(cmd->se_dev->tmr_wq, &cmd->work);
  2716. return 0;
  2717. }
  2718. EXPORT_SYMBOL(transport_generic_handle_tmr);
  2719. bool
  2720. target_check_wce(struct se_device *dev)
  2721. {
  2722. bool wce = false;
  2723. if (dev->transport->get_write_cache)
  2724. wce = dev->transport->get_write_cache(dev);
  2725. else if (dev->dev_attrib.emulate_write_cache > 0)
  2726. wce = true;
  2727. return wce;
  2728. }
  2729. bool
  2730. target_check_fua(struct se_device *dev)
  2731. {
  2732. return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
  2733. }