scsi_lib.c 86 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371
  1. /*
  2. * Copyright (C) 1999 Eric Youngdale
  3. * Copyright (C) 2014 Christoph Hellwig
  4. *
  5. * SCSI queueing library.
  6. * Initial versions: Eric Youngdale (eric@andante.org).
  7. * Based upon conversations with large numbers
  8. * of people at Linux Expo.
  9. */
  10. #include <linux/bio.h>
  11. #include <linux/bitops.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/completion.h>
  14. #include <linux/kernel.h>
  15. #include <linux/export.h>
  16. #include <linux/init.h>
  17. #include <linux/pci.h>
  18. #include <linux/delay.h>
  19. #include <linux/hardirq.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/blk-mq.h>
  22. #include <linux/ratelimit.h>
  23. #include <asm/unaligned.h>
  24. #include <scsi/scsi.h>
  25. #include <scsi/scsi_cmnd.h>
  26. #include <scsi/scsi_dbg.h>
  27. #include <scsi/scsi_device.h>
  28. #include <scsi/scsi_driver.h>
  29. #include <scsi/scsi_eh.h>
  30. #include <scsi/scsi_host.h>
  31. #include <scsi/scsi_dh.h>
  32. #include <trace/events/scsi.h>
  33. #include "scsi_debugfs.h"
  34. #include "scsi_priv.h"
  35. #include "scsi_logging.h"
  36. static struct kmem_cache *scsi_sdb_cache;
  37. static struct kmem_cache *scsi_sense_cache;
  38. static struct kmem_cache *scsi_sense_isadma_cache;
  39. static DEFINE_MUTEX(scsi_sense_cache_mutex);
  40. static inline struct kmem_cache *
  41. scsi_select_sense_cache(struct Scsi_Host *shost)
  42. {
  43. return shost->unchecked_isa_dma ?
  44. scsi_sense_isadma_cache : scsi_sense_cache;
  45. }
  46. static void scsi_free_sense_buffer(struct Scsi_Host *shost,
  47. unsigned char *sense_buffer)
  48. {
  49. kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer);
  50. }
  51. static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost,
  52. gfp_t gfp_mask, int numa_node)
  53. {
  54. return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask,
  55. numa_node);
  56. }
  57. int scsi_init_sense_cache(struct Scsi_Host *shost)
  58. {
  59. struct kmem_cache *cache;
  60. int ret = 0;
  61. cache = scsi_select_sense_cache(shost);
  62. if (cache)
  63. return 0;
  64. mutex_lock(&scsi_sense_cache_mutex);
  65. if (shost->unchecked_isa_dma) {
  66. scsi_sense_isadma_cache =
  67. kmem_cache_create("scsi_sense_cache(DMA)",
  68. SCSI_SENSE_BUFFERSIZE, 0,
  69. SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
  70. if (!scsi_sense_isadma_cache)
  71. ret = -ENOMEM;
  72. } else {
  73. scsi_sense_cache =
  74. kmem_cache_create("scsi_sense_cache",
  75. SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
  76. if (!scsi_sense_cache)
  77. ret = -ENOMEM;
  78. }
  79. mutex_unlock(&scsi_sense_cache_mutex);
  80. return ret;
  81. }
  82. /*
  83. * When to reinvoke queueing after a resource shortage. It's 3 msecs to
  84. * not change behaviour from the previous unplug mechanism, experimentation
  85. * may prove this needs changing.
  86. */
  87. #define SCSI_QUEUE_DELAY 3
  88. static void
  89. scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
  90. {
  91. struct Scsi_Host *host = cmd->device->host;
  92. struct scsi_device *device = cmd->device;
  93. struct scsi_target *starget = scsi_target(device);
  94. /*
  95. * Set the appropriate busy bit for the device/host.
  96. *
  97. * If the host/device isn't busy, assume that something actually
  98. * completed, and that we should be able to queue a command now.
  99. *
  100. * Note that the prior mid-layer assumption that any host could
  101. * always queue at least one command is now broken. The mid-layer
  102. * will implement a user specifiable stall (see
  103. * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
  104. * if a command is requeued with no other commands outstanding
  105. * either for the device or for the host.
  106. */
  107. switch (reason) {
  108. case SCSI_MLQUEUE_HOST_BUSY:
  109. atomic_set(&host->host_blocked, host->max_host_blocked);
  110. break;
  111. case SCSI_MLQUEUE_DEVICE_BUSY:
  112. case SCSI_MLQUEUE_EH_RETRY:
  113. atomic_set(&device->device_blocked,
  114. device->max_device_blocked);
  115. break;
  116. case SCSI_MLQUEUE_TARGET_BUSY:
  117. atomic_set(&starget->target_blocked,
  118. starget->max_target_blocked);
  119. break;
  120. }
  121. }
  122. static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
  123. {
  124. struct scsi_device *sdev = cmd->device;
  125. blk_mq_requeue_request(cmd->request, true);
  126. put_device(&sdev->sdev_gendev);
  127. }
  128. /**
  129. * __scsi_queue_insert - private queue insertion
  130. * @cmd: The SCSI command being requeued
  131. * @reason: The reason for the requeue
  132. * @unbusy: Whether the queue should be unbusied
  133. *
  134. * This is a private queue insertion. The public interface
  135. * scsi_queue_insert() always assumes the queue should be unbusied
  136. * because it's always called before the completion. This function is
  137. * for a requeue after completion, which should only occur in this
  138. * file.
  139. */
  140. static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
  141. {
  142. struct scsi_device *device = cmd->device;
  143. struct request_queue *q = device->request_queue;
  144. unsigned long flags;
  145. SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
  146. "Inserting command %p into mlqueue\n", cmd));
  147. scsi_set_blocked(cmd, reason);
  148. /*
  149. * Decrement the counters, since these commands are no longer
  150. * active on the host/device.
  151. */
  152. if (unbusy)
  153. scsi_device_unbusy(device);
  154. /*
  155. * Requeue this command. It will go before all other commands
  156. * that are already in the queue. Schedule requeue work under
  157. * lock such that the kblockd_schedule_work() call happens
  158. * before blk_cleanup_queue() finishes.
  159. */
  160. cmd->result = 0;
  161. if (q->mq_ops) {
  162. scsi_mq_requeue_cmd(cmd);
  163. return;
  164. }
  165. spin_lock_irqsave(q->queue_lock, flags);
  166. blk_requeue_request(q, cmd->request);
  167. kblockd_schedule_work(&device->requeue_work);
  168. spin_unlock_irqrestore(q->queue_lock, flags);
  169. }
  170. /*
  171. * Function: scsi_queue_insert()
  172. *
  173. * Purpose: Insert a command in the midlevel queue.
  174. *
  175. * Arguments: cmd - command that we are adding to queue.
  176. * reason - why we are inserting command to queue.
  177. *
  178. * Lock status: Assumed that lock is not held upon entry.
  179. *
  180. * Returns: Nothing.
  181. *
  182. * Notes: We do this for one of two cases. Either the host is busy
  183. * and it cannot accept any more commands for the time being,
  184. * or the device returned QUEUE_FULL and can accept no more
  185. * commands.
  186. * Notes: This could be called either from an interrupt context or a
  187. * normal process context.
  188. */
  189. void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
  190. {
  191. __scsi_queue_insert(cmd, reason, 1);
  192. }
  193. /**
  194. * scsi_execute - insert request and wait for the result
  195. * @sdev: scsi device
  196. * @cmd: scsi command
  197. * @data_direction: data direction
  198. * @buffer: data buffer
  199. * @bufflen: len of buffer
  200. * @sense: optional sense buffer
  201. * @sshdr: optional decoded sense header
  202. * @timeout: request timeout in seconds
  203. * @retries: number of times to retry request
  204. * @flags: flags for ->cmd_flags
  205. * @rq_flags: flags for ->rq_flags
  206. * @resid: optional residual length
  207. *
  208. * Returns the scsi_cmnd result field if a command was executed, or a negative
  209. * Linux error code if we didn't get that far.
  210. */
  211. int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
  212. int data_direction, void *buffer, unsigned bufflen,
  213. unsigned char *sense, struct scsi_sense_hdr *sshdr,
  214. int timeout, int retries, u64 flags, req_flags_t rq_flags,
  215. int *resid)
  216. {
  217. struct request *req;
  218. struct scsi_request *rq;
  219. int ret = DRIVER_ERROR << 24;
  220. req = blk_get_request(sdev->request_queue,
  221. data_direction == DMA_TO_DEVICE ?
  222. REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
  223. if (IS_ERR(req))
  224. return ret;
  225. rq = scsi_req(req);
  226. scsi_req_init(req);
  227. if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
  228. buffer, bufflen, __GFP_RECLAIM))
  229. goto out;
  230. rq->cmd_len = COMMAND_SIZE(cmd[0]);
  231. memcpy(rq->cmd, cmd, rq->cmd_len);
  232. rq->retries = retries;
  233. req->timeout = timeout;
  234. req->cmd_flags |= flags;
  235. req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
  236. /*
  237. * head injection *required* here otherwise quiesce won't work
  238. */
  239. blk_execute_rq(req->q, NULL, req, 1);
  240. /*
  241. * Some devices (USB mass-storage in particular) may transfer
  242. * garbage data together with a residue indicating that the data
  243. * is invalid. Prevent the garbage from being misinterpreted
  244. * and prevent security leaks by zeroing out the excess data.
  245. */
  246. if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
  247. memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
  248. if (resid)
  249. *resid = rq->resid_len;
  250. if (sense && rq->sense_len)
  251. memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
  252. if (sshdr)
  253. scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
  254. ret = rq->result;
  255. out:
  256. blk_put_request(req);
  257. return ret;
  258. }
  259. EXPORT_SYMBOL(scsi_execute);
  260. /*
  261. * Function: scsi_init_cmd_errh()
  262. *
  263. * Purpose: Initialize cmd fields related to error handling.
  264. *
  265. * Arguments: cmd - command that is ready to be queued.
  266. *
  267. * Notes: This function has the job of initializing a number of
  268. * fields related to error handling. Typically this will
  269. * be called once for each command, as required.
  270. */
  271. static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
  272. {
  273. cmd->serial_number = 0;
  274. scsi_set_resid(cmd, 0);
  275. memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  276. if (cmd->cmd_len == 0)
  277. cmd->cmd_len = scsi_command_size(cmd->cmnd);
  278. }
  279. void scsi_device_unbusy(struct scsi_device *sdev)
  280. {
  281. struct Scsi_Host *shost = sdev->host;
  282. struct scsi_target *starget = scsi_target(sdev);
  283. unsigned long flags;
  284. atomic_dec(&shost->host_busy);
  285. if (starget->can_queue > 0)
  286. atomic_dec(&starget->target_busy);
  287. if (unlikely(scsi_host_in_recovery(shost) &&
  288. (shost->host_failed || shost->host_eh_scheduled))) {
  289. spin_lock_irqsave(shost->host_lock, flags);
  290. scsi_eh_wakeup(shost);
  291. spin_unlock_irqrestore(shost->host_lock, flags);
  292. }
  293. atomic_dec(&sdev->device_busy);
  294. }
  295. static void scsi_kick_queue(struct request_queue *q)
  296. {
  297. if (q->mq_ops)
  298. blk_mq_start_hw_queues(q);
  299. else
  300. blk_run_queue(q);
  301. }
  302. /*
  303. * Called for single_lun devices on IO completion. Clear starget_sdev_user,
  304. * and call blk_run_queue for all the scsi_devices on the target -
  305. * including current_sdev first.
  306. *
  307. * Called with *no* scsi locks held.
  308. */
  309. static void scsi_single_lun_run(struct scsi_device *current_sdev)
  310. {
  311. struct Scsi_Host *shost = current_sdev->host;
  312. struct scsi_device *sdev, *tmp;
  313. struct scsi_target *starget = scsi_target(current_sdev);
  314. unsigned long flags;
  315. spin_lock_irqsave(shost->host_lock, flags);
  316. starget->starget_sdev_user = NULL;
  317. spin_unlock_irqrestore(shost->host_lock, flags);
  318. /*
  319. * Call blk_run_queue for all LUNs on the target, starting with
  320. * current_sdev. We race with others (to set starget_sdev_user),
  321. * but in most cases, we will be first. Ideally, each LU on the
  322. * target would get some limited time or requests on the target.
  323. */
  324. scsi_kick_queue(current_sdev->request_queue);
  325. spin_lock_irqsave(shost->host_lock, flags);
  326. if (starget->starget_sdev_user)
  327. goto out;
  328. list_for_each_entry_safe(sdev, tmp, &starget->devices,
  329. same_target_siblings) {
  330. if (sdev == current_sdev)
  331. continue;
  332. if (scsi_device_get(sdev))
  333. continue;
  334. spin_unlock_irqrestore(shost->host_lock, flags);
  335. scsi_kick_queue(sdev->request_queue);
  336. spin_lock_irqsave(shost->host_lock, flags);
  337. scsi_device_put(sdev);
  338. }
  339. out:
  340. spin_unlock_irqrestore(shost->host_lock, flags);
  341. }
  342. static inline bool scsi_device_is_busy(struct scsi_device *sdev)
  343. {
  344. if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
  345. return true;
  346. if (atomic_read(&sdev->device_blocked) > 0)
  347. return true;
  348. return false;
  349. }
  350. static inline bool scsi_target_is_busy(struct scsi_target *starget)
  351. {
  352. if (starget->can_queue > 0) {
  353. if (atomic_read(&starget->target_busy) >= starget->can_queue)
  354. return true;
  355. if (atomic_read(&starget->target_blocked) > 0)
  356. return true;
  357. }
  358. return false;
  359. }
  360. static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
  361. {
  362. if (shost->can_queue > 0 &&
  363. atomic_read(&shost->host_busy) >= shost->can_queue)
  364. return true;
  365. if (atomic_read(&shost->host_blocked) > 0)
  366. return true;
  367. if (shost->host_self_blocked)
  368. return true;
  369. return false;
  370. }
  371. static void scsi_starved_list_run(struct Scsi_Host *shost)
  372. {
  373. LIST_HEAD(starved_list);
  374. struct scsi_device *sdev;
  375. unsigned long flags;
  376. spin_lock_irqsave(shost->host_lock, flags);
  377. list_splice_init(&shost->starved_list, &starved_list);
  378. while (!list_empty(&starved_list)) {
  379. struct request_queue *slq;
  380. /*
  381. * As long as shost is accepting commands and we have
  382. * starved queues, call blk_run_queue. scsi_request_fn
  383. * drops the queue_lock and can add us back to the
  384. * starved_list.
  385. *
  386. * host_lock protects the starved_list and starved_entry.
  387. * scsi_request_fn must get the host_lock before checking
  388. * or modifying starved_list or starved_entry.
  389. */
  390. if (scsi_host_is_busy(shost))
  391. break;
  392. sdev = list_entry(starved_list.next,
  393. struct scsi_device, starved_entry);
  394. list_del_init(&sdev->starved_entry);
  395. if (scsi_target_is_busy(scsi_target(sdev))) {
  396. list_move_tail(&sdev->starved_entry,
  397. &shost->starved_list);
  398. continue;
  399. }
  400. /*
  401. * Once we drop the host lock, a racing scsi_remove_device()
  402. * call may remove the sdev from the starved list and destroy
  403. * it and the queue. Mitigate by taking a reference to the
  404. * queue and never touching the sdev again after we drop the
  405. * host lock. Note: if __scsi_remove_device() invokes
  406. * blk_cleanup_queue() before the queue is run from this
  407. * function then blk_run_queue() will return immediately since
  408. * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
  409. */
  410. slq = sdev->request_queue;
  411. if (!blk_get_queue(slq))
  412. continue;
  413. spin_unlock_irqrestore(shost->host_lock, flags);
  414. scsi_kick_queue(slq);
  415. blk_put_queue(slq);
  416. spin_lock_irqsave(shost->host_lock, flags);
  417. }
  418. /* put any unprocessed entries back */
  419. list_splice(&starved_list, &shost->starved_list);
  420. spin_unlock_irqrestore(shost->host_lock, flags);
  421. }
  422. /*
  423. * Function: scsi_run_queue()
  424. *
  425. * Purpose: Select a proper request queue to serve next
  426. *
  427. * Arguments: q - last request's queue
  428. *
  429. * Returns: Nothing
  430. *
  431. * Notes: The previous command was completely finished, start
  432. * a new one if possible.
  433. */
  434. static void scsi_run_queue(struct request_queue *q)
  435. {
  436. struct scsi_device *sdev = q->queuedata;
  437. if (scsi_target(sdev)->single_lun)
  438. scsi_single_lun_run(sdev);
  439. if (!list_empty(&sdev->host->starved_list))
  440. scsi_starved_list_run(sdev->host);
  441. if (q->mq_ops)
  442. blk_mq_run_hw_queues(q, false);
  443. else
  444. blk_run_queue(q);
  445. }
  446. void scsi_requeue_run_queue(struct work_struct *work)
  447. {
  448. struct scsi_device *sdev;
  449. struct request_queue *q;
  450. sdev = container_of(work, struct scsi_device, requeue_work);
  451. q = sdev->request_queue;
  452. scsi_run_queue(q);
  453. }
  454. /*
  455. * Function: scsi_requeue_command()
  456. *
  457. * Purpose: Handle post-processing of completed commands.
  458. *
  459. * Arguments: q - queue to operate on
  460. * cmd - command that may need to be requeued.
  461. *
  462. * Returns: Nothing
  463. *
  464. * Notes: After command completion, there may be blocks left
  465. * over which weren't finished by the previous command
  466. * this can be for a number of reasons - the main one is
  467. * I/O errors in the middle of the request, in which case
  468. * we need to request the blocks that come after the bad
  469. * sector.
  470. * Notes: Upon return, cmd is a stale pointer.
  471. */
  472. static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
  473. {
  474. struct scsi_device *sdev = cmd->device;
  475. struct request *req = cmd->request;
  476. unsigned long flags;
  477. spin_lock_irqsave(q->queue_lock, flags);
  478. blk_unprep_request(req);
  479. req->special = NULL;
  480. scsi_put_command(cmd);
  481. blk_requeue_request(q, req);
  482. spin_unlock_irqrestore(q->queue_lock, flags);
  483. scsi_run_queue(q);
  484. put_device(&sdev->sdev_gendev);
  485. }
  486. void scsi_run_host_queues(struct Scsi_Host *shost)
  487. {
  488. struct scsi_device *sdev;
  489. shost_for_each_device(sdev, shost)
  490. scsi_run_queue(sdev->request_queue);
  491. }
  492. static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
  493. {
  494. if (!blk_rq_is_passthrough(cmd->request)) {
  495. struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
  496. if (drv->uninit_command)
  497. drv->uninit_command(cmd);
  498. }
  499. }
  500. static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
  501. {
  502. struct scsi_data_buffer *sdb;
  503. if (cmd->sdb.table.nents)
  504. sg_free_table_chained(&cmd->sdb.table, true);
  505. if (cmd->request->next_rq) {
  506. sdb = cmd->request->next_rq->special;
  507. if (sdb)
  508. sg_free_table_chained(&sdb->table, true);
  509. }
  510. if (scsi_prot_sg_count(cmd))
  511. sg_free_table_chained(&cmd->prot_sdb->table, true);
  512. }
  513. static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
  514. {
  515. struct scsi_device *sdev = cmd->device;
  516. struct Scsi_Host *shost = sdev->host;
  517. unsigned long flags;
  518. scsi_mq_free_sgtables(cmd);
  519. scsi_uninit_cmd(cmd);
  520. if (shost->use_cmd_list) {
  521. BUG_ON(list_empty(&cmd->list));
  522. spin_lock_irqsave(&sdev->list_lock, flags);
  523. list_del_init(&cmd->list);
  524. spin_unlock_irqrestore(&sdev->list_lock, flags);
  525. }
  526. }
  527. /*
  528. * Function: scsi_release_buffers()
  529. *
  530. * Purpose: Free resources allocate for a scsi_command.
  531. *
  532. * Arguments: cmd - command that we are bailing.
  533. *
  534. * Lock status: Assumed that no lock is held upon entry.
  535. *
  536. * Returns: Nothing
  537. *
  538. * Notes: In the event that an upper level driver rejects a
  539. * command, we must release resources allocated during
  540. * the __init_io() function. Primarily this would involve
  541. * the scatter-gather table.
  542. */
  543. static void scsi_release_buffers(struct scsi_cmnd *cmd)
  544. {
  545. if (cmd->sdb.table.nents)
  546. sg_free_table_chained(&cmd->sdb.table, false);
  547. memset(&cmd->sdb, 0, sizeof(cmd->sdb));
  548. if (scsi_prot_sg_count(cmd))
  549. sg_free_table_chained(&cmd->prot_sdb->table, false);
  550. }
  551. static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
  552. {
  553. struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
  554. sg_free_table_chained(&bidi_sdb->table, false);
  555. kmem_cache_free(scsi_sdb_cache, bidi_sdb);
  556. cmd->request->next_rq->special = NULL;
  557. }
  558. static bool scsi_end_request(struct request *req, int error,
  559. unsigned int bytes, unsigned int bidi_bytes)
  560. {
  561. struct scsi_cmnd *cmd = req->special;
  562. struct scsi_device *sdev = cmd->device;
  563. struct request_queue *q = sdev->request_queue;
  564. if (blk_update_request(req, error, bytes))
  565. return true;
  566. /* Bidi request must be completed as a whole */
  567. if (unlikely(bidi_bytes) &&
  568. blk_update_request(req->next_rq, error, bidi_bytes))
  569. return true;
  570. if (blk_queue_add_random(q))
  571. add_disk_randomness(req->rq_disk);
  572. if (req->mq_ctx) {
  573. /*
  574. * In the MQ case the command gets freed by __blk_mq_end_request,
  575. * so we have to do all cleanup that depends on it earlier.
  576. *
  577. * We also can't kick the queues from irq context, so we
  578. * will have to defer it to a workqueue.
  579. */
  580. scsi_mq_uninit_cmd(cmd);
  581. __blk_mq_end_request(req, error);
  582. if (scsi_target(sdev)->single_lun ||
  583. !list_empty(&sdev->host->starved_list))
  584. kblockd_schedule_work(&sdev->requeue_work);
  585. else
  586. blk_mq_run_hw_queues(q, true);
  587. } else {
  588. unsigned long flags;
  589. if (bidi_bytes)
  590. scsi_release_bidi_buffers(cmd);
  591. scsi_release_buffers(cmd);
  592. scsi_put_command(cmd);
  593. spin_lock_irqsave(q->queue_lock, flags);
  594. blk_finish_request(req, error);
  595. spin_unlock_irqrestore(q->queue_lock, flags);
  596. scsi_run_queue(q);
  597. }
  598. put_device(&sdev->sdev_gendev);
  599. return false;
  600. }
  601. /**
  602. * __scsi_error_from_host_byte - translate SCSI error code into errno
  603. * @cmd: SCSI command (unused)
  604. * @result: scsi error code
  605. *
  606. * Translate SCSI error code into standard UNIX errno.
  607. * Return values:
  608. * -ENOLINK temporary transport failure
  609. * -EREMOTEIO permanent target failure, do not retry
  610. * -EBADE permanent nexus failure, retry on other path
  611. * -ENOSPC No write space available
  612. * -ENODATA Medium error
  613. * -EIO unspecified I/O error
  614. */
  615. static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
  616. {
  617. int error = 0;
  618. switch(host_byte(result)) {
  619. case DID_TRANSPORT_FAILFAST:
  620. error = -ENOLINK;
  621. break;
  622. case DID_TARGET_FAILURE:
  623. set_host_byte(cmd, DID_OK);
  624. error = -EREMOTEIO;
  625. break;
  626. case DID_NEXUS_FAILURE:
  627. set_host_byte(cmd, DID_OK);
  628. error = -EBADE;
  629. break;
  630. case DID_ALLOC_FAILURE:
  631. set_host_byte(cmd, DID_OK);
  632. error = -ENOSPC;
  633. break;
  634. case DID_MEDIUM_ERROR:
  635. set_host_byte(cmd, DID_OK);
  636. error = -ENODATA;
  637. break;
  638. default:
  639. error = -EIO;
  640. break;
  641. }
  642. return error;
  643. }
  644. /*
  645. * Function: scsi_io_completion()
  646. *
  647. * Purpose: Completion processing for block device I/O requests.
  648. *
  649. * Arguments: cmd - command that is finished.
  650. *
  651. * Lock status: Assumed that no lock is held upon entry.
  652. *
  653. * Returns: Nothing
  654. *
  655. * Notes: We will finish off the specified number of sectors. If we
  656. * are done, the command block will be released and the queue
  657. * function will be goosed. If we are not done then we have to
  658. * figure out what to do next:
  659. *
  660. * a) We can call scsi_requeue_command(). The request
  661. * will be unprepared and put back on the queue. Then
  662. * a new command will be created for it. This should
  663. * be used if we made forward progress, or if we want
  664. * to switch from READ(10) to READ(6) for example.
  665. *
  666. * b) We can call __scsi_queue_insert(). The request will
  667. * be put back on the queue and retried using the same
  668. * command as before, possibly after a delay.
  669. *
  670. * c) We can call scsi_end_request() with -EIO to fail
  671. * the remainder of the request.
  672. */
  673. void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  674. {
  675. int result = cmd->result;
  676. struct request_queue *q = cmd->device->request_queue;
  677. struct request *req = cmd->request;
  678. int error = 0;
  679. struct scsi_sense_hdr sshdr;
  680. bool sense_valid = false;
  681. int sense_deferred = 0, level = 0;
  682. enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
  683. ACTION_DELAYED_RETRY} action;
  684. unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
  685. if (result) {
  686. sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
  687. if (sense_valid)
  688. sense_deferred = scsi_sense_is_deferred(&sshdr);
  689. }
  690. if (blk_rq_is_passthrough(req)) {
  691. if (result) {
  692. if (sense_valid) {
  693. /*
  694. * SG_IO wants current and deferred errors
  695. */
  696. scsi_req(req)->sense_len =
  697. min(8 + cmd->sense_buffer[7],
  698. SCSI_SENSE_BUFFERSIZE);
  699. }
  700. if (!sense_deferred)
  701. error = __scsi_error_from_host_byte(cmd, result);
  702. }
  703. /*
  704. * __scsi_error_from_host_byte may have reset the host_byte
  705. */
  706. scsi_req(req)->result = cmd->result;
  707. scsi_req(req)->resid_len = scsi_get_resid(cmd);
  708. if (scsi_bidi_cmnd(cmd)) {
  709. /*
  710. * Bidi commands Must be complete as a whole,
  711. * both sides at once.
  712. */
  713. scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
  714. if (scsi_end_request(req, 0, blk_rq_bytes(req),
  715. blk_rq_bytes(req->next_rq)))
  716. BUG();
  717. return;
  718. }
  719. } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
  720. /*
  721. * Flush commands do not transfers any data, and thus cannot use
  722. * good_bytes != blk_rq_bytes(req) as the signal for an error.
  723. * This sets the error explicitly for the problem case.
  724. */
  725. error = __scsi_error_from_host_byte(cmd, result);
  726. }
  727. /* no bidi support for !blk_rq_is_passthrough yet */
  728. BUG_ON(blk_bidi_rq(req));
  729. /*
  730. * Next deal with any sectors which we were able to correctly
  731. * handle.
  732. */
  733. SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
  734. "%u sectors total, %d bytes done.\n",
  735. blk_rq_sectors(req), good_bytes));
  736. /*
  737. * Recovered errors need reporting, but they're always treated as
  738. * success, so fiddle the result code here. For passthrough requests
  739. * we already took a copy of the original into sreq->result which
  740. * is what gets returned to the user
  741. */
  742. if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
  743. /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
  744. * print since caller wants ATA registers. Only occurs on
  745. * SCSI ATA PASS_THROUGH commands when CK_COND=1
  746. */
  747. if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
  748. ;
  749. else if (!(req->rq_flags & RQF_QUIET))
  750. scsi_print_sense(cmd);
  751. result = 0;
  752. /* for passthrough error may be set */
  753. error = 0;
  754. }
  755. /*
  756. * special case: failed zero length commands always need to
  757. * drop down into the retry code. Otherwise, if we finished
  758. * all bytes in the request we are done now.
  759. */
  760. if (!(blk_rq_bytes(req) == 0 && error) &&
  761. !scsi_end_request(req, error, good_bytes, 0))
  762. return;
  763. /*
  764. * Kill remainder if no retrys.
  765. */
  766. if (error && scsi_noretry_cmd(cmd)) {
  767. if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
  768. BUG();
  769. return;
  770. }
  771. /*
  772. * If there had been no error, but we have leftover bytes in the
  773. * requeues just queue the command up again.
  774. */
  775. if (result == 0)
  776. goto requeue;
  777. error = __scsi_error_from_host_byte(cmd, result);
  778. if (host_byte(result) == DID_RESET) {
  779. /* Third party bus reset or reset for error recovery
  780. * reasons. Just retry the command and see what
  781. * happens.
  782. */
  783. action = ACTION_RETRY;
  784. } else if (sense_valid && !sense_deferred) {
  785. switch (sshdr.sense_key) {
  786. case UNIT_ATTENTION:
  787. if (cmd->device->removable) {
  788. /* Detected disc change. Set a bit
  789. * and quietly refuse further access.
  790. */
  791. cmd->device->changed = 1;
  792. action = ACTION_FAIL;
  793. } else {
  794. /* Must have been a power glitch, or a
  795. * bus reset. Could not have been a
  796. * media change, so we just retry the
  797. * command and see what happens.
  798. */
  799. action = ACTION_RETRY;
  800. }
  801. break;
  802. case ILLEGAL_REQUEST:
  803. /* If we had an ILLEGAL REQUEST returned, then
  804. * we may have performed an unsupported
  805. * command. The only thing this should be
  806. * would be a ten byte read where only a six
  807. * byte read was supported. Also, on a system
  808. * where READ CAPACITY failed, we may have
  809. * read past the end of the disk.
  810. */
  811. if ((cmd->device->use_10_for_rw &&
  812. sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
  813. (cmd->cmnd[0] == READ_10 ||
  814. cmd->cmnd[0] == WRITE_10)) {
  815. /* This will issue a new 6-byte command. */
  816. cmd->device->use_10_for_rw = 0;
  817. action = ACTION_REPREP;
  818. } else if (sshdr.asc == 0x10) /* DIX */ {
  819. action = ACTION_FAIL;
  820. error = -EILSEQ;
  821. /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
  822. } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
  823. action = ACTION_FAIL;
  824. error = -EREMOTEIO;
  825. } else
  826. action = ACTION_FAIL;
  827. break;
  828. case ABORTED_COMMAND:
  829. action = ACTION_FAIL;
  830. if (sshdr.asc == 0x10) /* DIF */
  831. error = -EILSEQ;
  832. break;
  833. case NOT_READY:
  834. /* If the device is in the process of becoming
  835. * ready, or has a temporary blockage, retry.
  836. */
  837. if (sshdr.asc == 0x04) {
  838. switch (sshdr.ascq) {
  839. case 0x01: /* becoming ready */
  840. case 0x04: /* format in progress */
  841. case 0x05: /* rebuild in progress */
  842. case 0x06: /* recalculation in progress */
  843. case 0x07: /* operation in progress */
  844. case 0x08: /* Long write in progress */
  845. case 0x09: /* self test in progress */
  846. case 0x14: /* space allocation in progress */
  847. action = ACTION_DELAYED_RETRY;
  848. break;
  849. default:
  850. action = ACTION_FAIL;
  851. break;
  852. }
  853. } else
  854. action = ACTION_FAIL;
  855. break;
  856. case VOLUME_OVERFLOW:
  857. /* See SSC3rXX or current. */
  858. action = ACTION_FAIL;
  859. break;
  860. default:
  861. action = ACTION_FAIL;
  862. break;
  863. }
  864. } else
  865. action = ACTION_FAIL;
  866. if (action != ACTION_FAIL &&
  867. time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
  868. action = ACTION_FAIL;
  869. switch (action) {
  870. case ACTION_FAIL:
  871. /* Give up and fail the remainder of the request */
  872. if (!(req->rq_flags & RQF_QUIET)) {
  873. static DEFINE_RATELIMIT_STATE(_rs,
  874. DEFAULT_RATELIMIT_INTERVAL,
  875. DEFAULT_RATELIMIT_BURST);
  876. if (unlikely(scsi_logging_level))
  877. level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
  878. SCSI_LOG_MLCOMPLETE_BITS);
  879. /*
  880. * if logging is enabled the failure will be printed
  881. * in scsi_log_completion(), so avoid duplicate messages
  882. */
  883. if (!level && __ratelimit(&_rs)) {
  884. scsi_print_result(cmd, NULL, FAILED);
  885. if (driver_byte(result) & DRIVER_SENSE)
  886. scsi_print_sense(cmd);
  887. scsi_print_command(cmd);
  888. }
  889. }
  890. if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
  891. return;
  892. /*FALLTHRU*/
  893. case ACTION_REPREP:
  894. requeue:
  895. /* Unprep the request and put it back at the head of the queue.
  896. * A new command will be prepared and issued.
  897. */
  898. if (q->mq_ops) {
  899. cmd->request->rq_flags &= ~RQF_DONTPREP;
  900. scsi_mq_uninit_cmd(cmd);
  901. scsi_mq_requeue_cmd(cmd);
  902. } else {
  903. scsi_release_buffers(cmd);
  904. scsi_requeue_command(q, cmd);
  905. }
  906. break;
  907. case ACTION_RETRY:
  908. /* Retry the same command immediately */
  909. __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
  910. break;
  911. case ACTION_DELAYED_RETRY:
  912. /* Retry the same command after a delay */
  913. __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
  914. break;
  915. }
  916. }
  917. static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
  918. {
  919. int count;
  920. /*
  921. * If sg table allocation fails, requeue request later.
  922. */
  923. if (unlikely(sg_alloc_table_chained(&sdb->table,
  924. blk_rq_nr_phys_segments(req), sdb->table.sgl)))
  925. return BLKPREP_DEFER;
  926. /*
  927. * Next, walk the list, and fill in the addresses and sizes of
  928. * each segment.
  929. */
  930. count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
  931. BUG_ON(count > sdb->table.nents);
  932. sdb->table.nents = count;
  933. sdb->length = blk_rq_payload_bytes(req);
  934. return BLKPREP_OK;
  935. }
  936. /*
  937. * Function: scsi_init_io()
  938. *
  939. * Purpose: SCSI I/O initialize function.
  940. *
  941. * Arguments: cmd - Command descriptor we wish to initialize
  942. *
  943. * Returns: 0 on success
  944. * BLKPREP_DEFER if the failure is retryable
  945. * BLKPREP_KILL if the failure is fatal
  946. */
  947. int scsi_init_io(struct scsi_cmnd *cmd)
  948. {
  949. struct scsi_device *sdev = cmd->device;
  950. struct request *rq = cmd->request;
  951. bool is_mq = (rq->mq_ctx != NULL);
  952. int error = BLKPREP_KILL;
  953. if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
  954. goto err_exit;
  955. error = scsi_init_sgtable(rq, &cmd->sdb);
  956. if (error)
  957. goto err_exit;
  958. if (blk_bidi_rq(rq)) {
  959. if (!rq->q->mq_ops) {
  960. struct scsi_data_buffer *bidi_sdb =
  961. kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
  962. if (!bidi_sdb) {
  963. error = BLKPREP_DEFER;
  964. goto err_exit;
  965. }
  966. rq->next_rq->special = bidi_sdb;
  967. }
  968. error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
  969. if (error)
  970. goto err_exit;
  971. }
  972. if (blk_integrity_rq(rq)) {
  973. struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
  974. int ivecs, count;
  975. if (prot_sdb == NULL) {
  976. /*
  977. * This can happen if someone (e.g. multipath)
  978. * queues a command to a device on an adapter
  979. * that does not support DIX.
  980. */
  981. WARN_ON_ONCE(1);
  982. error = BLKPREP_KILL;
  983. goto err_exit;
  984. }
  985. ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
  986. if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
  987. prot_sdb->table.sgl)) {
  988. error = BLKPREP_DEFER;
  989. goto err_exit;
  990. }
  991. count = blk_rq_map_integrity_sg(rq->q, rq->bio,
  992. prot_sdb->table.sgl);
  993. BUG_ON(unlikely(count > ivecs));
  994. BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
  995. cmd->prot_sdb = prot_sdb;
  996. cmd->prot_sdb->table.nents = count;
  997. }
  998. return BLKPREP_OK;
  999. err_exit:
  1000. if (is_mq) {
  1001. scsi_mq_free_sgtables(cmd);
  1002. } else {
  1003. scsi_release_buffers(cmd);
  1004. cmd->request->special = NULL;
  1005. scsi_put_command(cmd);
  1006. put_device(&sdev->sdev_gendev);
  1007. }
  1008. return error;
  1009. }
  1010. EXPORT_SYMBOL(scsi_init_io);
  1011. void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
  1012. {
  1013. void *buf = cmd->sense_buffer;
  1014. void *prot = cmd->prot_sdb;
  1015. unsigned long flags;
  1016. /* zero out the cmd, except for the embedded scsi_request */
  1017. memset((char *)cmd + sizeof(cmd->req), 0,
  1018. sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
  1019. cmd->device = dev;
  1020. cmd->sense_buffer = buf;
  1021. cmd->prot_sdb = prot;
  1022. INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
  1023. cmd->jiffies_at_alloc = jiffies;
  1024. spin_lock_irqsave(&dev->list_lock, flags);
  1025. list_add_tail(&cmd->list, &dev->cmd_list);
  1026. spin_unlock_irqrestore(&dev->list_lock, flags);
  1027. }
  1028. static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
  1029. {
  1030. struct scsi_cmnd *cmd = req->special;
  1031. /*
  1032. * Passthrough requests may transfer data, in which case they must
  1033. * a bio attached to them. Or they might contain a SCSI command
  1034. * that does not transfer data, in which case they may optionally
  1035. * submit a request without an attached bio.
  1036. */
  1037. if (req->bio) {
  1038. int ret = scsi_init_io(cmd);
  1039. if (unlikely(ret))
  1040. return ret;
  1041. } else {
  1042. BUG_ON(blk_rq_bytes(req));
  1043. memset(&cmd->sdb, 0, sizeof(cmd->sdb));
  1044. }
  1045. cmd->cmd_len = scsi_req(req)->cmd_len;
  1046. cmd->cmnd = scsi_req(req)->cmd;
  1047. cmd->transfersize = blk_rq_bytes(req);
  1048. cmd->allowed = scsi_req(req)->retries;
  1049. return BLKPREP_OK;
  1050. }
  1051. /*
  1052. * Setup a normal block command. These are simple request from filesystems
  1053. * that still need to be translated to SCSI CDBs from the ULD.
  1054. */
  1055. static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
  1056. {
  1057. struct scsi_cmnd *cmd = req->special;
  1058. if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
  1059. int ret = sdev->handler->prep_fn(sdev, req);
  1060. if (ret != BLKPREP_OK)
  1061. return ret;
  1062. }
  1063. cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
  1064. memset(cmd->cmnd, 0, BLK_MAX_CDB);
  1065. return scsi_cmd_to_driver(cmd)->init_command(cmd);
  1066. }
  1067. static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
  1068. {
  1069. struct scsi_cmnd *cmd = req->special;
  1070. if (!blk_rq_bytes(req))
  1071. cmd->sc_data_direction = DMA_NONE;
  1072. else if (rq_data_dir(req) == WRITE)
  1073. cmd->sc_data_direction = DMA_TO_DEVICE;
  1074. else
  1075. cmd->sc_data_direction = DMA_FROM_DEVICE;
  1076. if (blk_rq_is_scsi(req))
  1077. return scsi_setup_scsi_cmnd(sdev, req);
  1078. else
  1079. return scsi_setup_fs_cmnd(sdev, req);
  1080. }
  1081. static int
  1082. scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
  1083. {
  1084. int ret = BLKPREP_OK;
  1085. /*
  1086. * If the device is not in running state we will reject some
  1087. * or all commands.
  1088. */
  1089. if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
  1090. switch (sdev->sdev_state) {
  1091. case SDEV_OFFLINE:
  1092. case SDEV_TRANSPORT_OFFLINE:
  1093. /*
  1094. * If the device is offline we refuse to process any
  1095. * commands. The device must be brought online
  1096. * before trying any recovery commands.
  1097. */
  1098. sdev_printk(KERN_ERR, sdev,
  1099. "rejecting I/O to offline device\n");
  1100. ret = BLKPREP_KILL;
  1101. break;
  1102. case SDEV_DEL:
  1103. /*
  1104. * If the device is fully deleted, we refuse to
  1105. * process any commands as well.
  1106. */
  1107. sdev_printk(KERN_ERR, sdev,
  1108. "rejecting I/O to dead device\n");
  1109. ret = BLKPREP_KILL;
  1110. break;
  1111. case SDEV_BLOCK:
  1112. case SDEV_CREATED_BLOCK:
  1113. ret = BLKPREP_DEFER;
  1114. break;
  1115. case SDEV_QUIESCE:
  1116. /*
  1117. * If the devices is blocked we defer normal commands.
  1118. */
  1119. if (!(req->rq_flags & RQF_PREEMPT))
  1120. ret = BLKPREP_DEFER;
  1121. break;
  1122. default:
  1123. /*
  1124. * For any other not fully online state we only allow
  1125. * special commands. In particular any user initiated
  1126. * command is not allowed.
  1127. */
  1128. if (!(req->rq_flags & RQF_PREEMPT))
  1129. ret = BLKPREP_KILL;
  1130. break;
  1131. }
  1132. }
  1133. return ret;
  1134. }
  1135. static int
  1136. scsi_prep_return(struct request_queue *q, struct request *req, int ret)
  1137. {
  1138. struct scsi_device *sdev = q->queuedata;
  1139. switch (ret) {
  1140. case BLKPREP_KILL:
  1141. case BLKPREP_INVALID:
  1142. scsi_req(req)->result = DID_NO_CONNECT << 16;
  1143. /* release the command and kill it */
  1144. if (req->special) {
  1145. struct scsi_cmnd *cmd = req->special;
  1146. scsi_release_buffers(cmd);
  1147. scsi_put_command(cmd);
  1148. put_device(&sdev->sdev_gendev);
  1149. req->special = NULL;
  1150. }
  1151. break;
  1152. case BLKPREP_DEFER:
  1153. /*
  1154. * If we defer, the blk_peek_request() returns NULL, but the
  1155. * queue must be restarted, so we schedule a callback to happen
  1156. * shortly.
  1157. */
  1158. if (atomic_read(&sdev->device_busy) == 0)
  1159. blk_delay_queue(q, SCSI_QUEUE_DELAY);
  1160. break;
  1161. default:
  1162. req->rq_flags |= RQF_DONTPREP;
  1163. }
  1164. return ret;
  1165. }
  1166. static int scsi_prep_fn(struct request_queue *q, struct request *req)
  1167. {
  1168. struct scsi_device *sdev = q->queuedata;
  1169. struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
  1170. int ret;
  1171. ret = scsi_prep_state_check(sdev, req);
  1172. if (ret != BLKPREP_OK)
  1173. goto out;
  1174. if (!req->special) {
  1175. /* Bail if we can't get a reference to the device */
  1176. if (unlikely(!get_device(&sdev->sdev_gendev))) {
  1177. ret = BLKPREP_DEFER;
  1178. goto out;
  1179. }
  1180. scsi_init_command(sdev, cmd);
  1181. req->special = cmd;
  1182. }
  1183. cmd->tag = req->tag;
  1184. cmd->request = req;
  1185. cmd->prot_op = SCSI_PROT_NORMAL;
  1186. ret = scsi_setup_cmnd(sdev, req);
  1187. out:
  1188. return scsi_prep_return(q, req, ret);
  1189. }
  1190. static void scsi_unprep_fn(struct request_queue *q, struct request *req)
  1191. {
  1192. scsi_uninit_cmd(req->special);
  1193. }
  1194. /*
  1195. * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
  1196. * return 0.
  1197. *
  1198. * Called with the queue_lock held.
  1199. */
  1200. static inline int scsi_dev_queue_ready(struct request_queue *q,
  1201. struct scsi_device *sdev)
  1202. {
  1203. unsigned int busy;
  1204. busy = atomic_inc_return(&sdev->device_busy) - 1;
  1205. if (atomic_read(&sdev->device_blocked)) {
  1206. if (busy)
  1207. goto out_dec;
  1208. /*
  1209. * unblock after device_blocked iterates to zero
  1210. */
  1211. if (atomic_dec_return(&sdev->device_blocked) > 0) {
  1212. /*
  1213. * For the MQ case we take care of this in the caller.
  1214. */
  1215. if (!q->mq_ops)
  1216. blk_delay_queue(q, SCSI_QUEUE_DELAY);
  1217. goto out_dec;
  1218. }
  1219. SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
  1220. "unblocking device at zero depth\n"));
  1221. }
  1222. if (busy >= sdev->queue_depth)
  1223. goto out_dec;
  1224. return 1;
  1225. out_dec:
  1226. atomic_dec(&sdev->device_busy);
  1227. return 0;
  1228. }
  1229. /*
  1230. * scsi_target_queue_ready: checks if there we can send commands to target
  1231. * @sdev: scsi device on starget to check.
  1232. */
  1233. static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
  1234. struct scsi_device *sdev)
  1235. {
  1236. struct scsi_target *starget = scsi_target(sdev);
  1237. unsigned int busy;
  1238. if (starget->single_lun) {
  1239. spin_lock_irq(shost->host_lock);
  1240. if (starget->starget_sdev_user &&
  1241. starget->starget_sdev_user != sdev) {
  1242. spin_unlock_irq(shost->host_lock);
  1243. return 0;
  1244. }
  1245. starget->starget_sdev_user = sdev;
  1246. spin_unlock_irq(shost->host_lock);
  1247. }
  1248. if (starget->can_queue <= 0)
  1249. return 1;
  1250. busy = atomic_inc_return(&starget->target_busy) - 1;
  1251. if (atomic_read(&starget->target_blocked) > 0) {
  1252. if (busy)
  1253. goto starved;
  1254. /*
  1255. * unblock after target_blocked iterates to zero
  1256. */
  1257. if (atomic_dec_return(&starget->target_blocked) > 0)
  1258. goto out_dec;
  1259. SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
  1260. "unblocking target at zero depth\n"));
  1261. }
  1262. if (busy >= starget->can_queue)
  1263. goto starved;
  1264. return 1;
  1265. starved:
  1266. spin_lock_irq(shost->host_lock);
  1267. list_move_tail(&sdev->starved_entry, &shost->starved_list);
  1268. spin_unlock_irq(shost->host_lock);
  1269. out_dec:
  1270. if (starget->can_queue > 0)
  1271. atomic_dec(&starget->target_busy);
  1272. return 0;
  1273. }
  1274. /*
  1275. * scsi_host_queue_ready: if we can send requests to shost, return 1 else
  1276. * return 0. We must end up running the queue again whenever 0 is
  1277. * returned, else IO can hang.
  1278. */
  1279. static inline int scsi_host_queue_ready(struct request_queue *q,
  1280. struct Scsi_Host *shost,
  1281. struct scsi_device *sdev)
  1282. {
  1283. unsigned int busy;
  1284. if (scsi_host_in_recovery(shost))
  1285. return 0;
  1286. busy = atomic_inc_return(&shost->host_busy) - 1;
  1287. if (atomic_read(&shost->host_blocked) > 0) {
  1288. if (busy)
  1289. goto starved;
  1290. /*
  1291. * unblock after host_blocked iterates to zero
  1292. */
  1293. if (atomic_dec_return(&shost->host_blocked) > 0)
  1294. goto out_dec;
  1295. SCSI_LOG_MLQUEUE(3,
  1296. shost_printk(KERN_INFO, shost,
  1297. "unblocking host at zero depth\n"));
  1298. }
  1299. if (shost->can_queue > 0 && busy >= shost->can_queue)
  1300. goto starved;
  1301. if (shost->host_self_blocked)
  1302. goto starved;
  1303. /* We're OK to process the command, so we can't be starved */
  1304. if (!list_empty(&sdev->starved_entry)) {
  1305. spin_lock_irq(shost->host_lock);
  1306. if (!list_empty(&sdev->starved_entry))
  1307. list_del_init(&sdev->starved_entry);
  1308. spin_unlock_irq(shost->host_lock);
  1309. }
  1310. return 1;
  1311. starved:
  1312. spin_lock_irq(shost->host_lock);
  1313. if (list_empty(&sdev->starved_entry))
  1314. list_add_tail(&sdev->starved_entry, &shost->starved_list);
  1315. spin_unlock_irq(shost->host_lock);
  1316. out_dec:
  1317. atomic_dec(&shost->host_busy);
  1318. return 0;
  1319. }
  1320. /*
  1321. * Busy state exporting function for request stacking drivers.
  1322. *
  1323. * For efficiency, no lock is taken to check the busy state of
  1324. * shost/starget/sdev, since the returned value is not guaranteed and
  1325. * may be changed after request stacking drivers call the function,
  1326. * regardless of taking lock or not.
  1327. *
  1328. * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
  1329. * needs to return 'not busy'. Otherwise, request stacking drivers
  1330. * may hold requests forever.
  1331. */
  1332. static int scsi_lld_busy(struct request_queue *q)
  1333. {
  1334. struct scsi_device *sdev = q->queuedata;
  1335. struct Scsi_Host *shost;
  1336. if (blk_queue_dying(q))
  1337. return 0;
  1338. shost = sdev->host;
  1339. /*
  1340. * Ignore host/starget busy state.
  1341. * Since block layer does not have a concept of fairness across
  1342. * multiple queues, congestion of host/starget needs to be handled
  1343. * in SCSI layer.
  1344. */
  1345. if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
  1346. return 1;
  1347. return 0;
  1348. }
  1349. /*
  1350. * Kill a request for a dead device
  1351. */
  1352. static void scsi_kill_request(struct request *req, struct request_queue *q)
  1353. {
  1354. struct scsi_cmnd *cmd = req->special;
  1355. struct scsi_device *sdev;
  1356. struct scsi_target *starget;
  1357. struct Scsi_Host *shost;
  1358. blk_start_request(req);
  1359. scmd_printk(KERN_INFO, cmd, "killing request\n");
  1360. sdev = cmd->device;
  1361. starget = scsi_target(sdev);
  1362. shost = sdev->host;
  1363. scsi_init_cmd_errh(cmd);
  1364. cmd->result = DID_NO_CONNECT << 16;
  1365. atomic_inc(&cmd->device->iorequest_cnt);
  1366. /*
  1367. * SCSI request completion path will do scsi_device_unbusy(),
  1368. * bump busy counts. To bump the counters, we need to dance
  1369. * with the locks as normal issue path does.
  1370. */
  1371. atomic_inc(&sdev->device_busy);
  1372. atomic_inc(&shost->host_busy);
  1373. if (starget->can_queue > 0)
  1374. atomic_inc(&starget->target_busy);
  1375. blk_complete_request(req);
  1376. }
  1377. static void scsi_softirq_done(struct request *rq)
  1378. {
  1379. struct scsi_cmnd *cmd = rq->special;
  1380. unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
  1381. int disposition;
  1382. INIT_LIST_HEAD(&cmd->eh_entry);
  1383. atomic_inc(&cmd->device->iodone_cnt);
  1384. if (cmd->result)
  1385. atomic_inc(&cmd->device->ioerr_cnt);
  1386. disposition = scsi_decide_disposition(cmd);
  1387. if (disposition != SUCCESS &&
  1388. time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
  1389. sdev_printk(KERN_ERR, cmd->device,
  1390. "timing out command, waited %lus\n",
  1391. wait_for/HZ);
  1392. disposition = SUCCESS;
  1393. }
  1394. scsi_log_completion(cmd, disposition);
  1395. switch (disposition) {
  1396. case SUCCESS:
  1397. scsi_finish_command(cmd);
  1398. break;
  1399. case NEEDS_RETRY:
  1400. scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
  1401. break;
  1402. case ADD_TO_MLQUEUE:
  1403. scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
  1404. break;
  1405. default:
  1406. if (!scsi_eh_scmd_add(cmd, 0))
  1407. scsi_finish_command(cmd);
  1408. }
  1409. }
  1410. /**
  1411. * scsi_dispatch_command - Dispatch a command to the low-level driver.
  1412. * @cmd: command block we are dispatching.
  1413. *
  1414. * Return: nonzero return request was rejected and device's queue needs to be
  1415. * plugged.
  1416. */
  1417. static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
  1418. {
  1419. struct Scsi_Host *host = cmd->device->host;
  1420. int rtn = 0;
  1421. atomic_inc(&cmd->device->iorequest_cnt);
  1422. /* check if the device is still usable */
  1423. if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
  1424. /* in SDEV_DEL we error all commands. DID_NO_CONNECT
  1425. * returns an immediate error upwards, and signals
  1426. * that the device is no longer present */
  1427. cmd->result = DID_NO_CONNECT << 16;
  1428. goto done;
  1429. }
  1430. /* Check to see if the scsi lld made this device blocked. */
  1431. if (unlikely(scsi_device_blocked(cmd->device))) {
  1432. /*
  1433. * in blocked state, the command is just put back on
  1434. * the device queue. The suspend state has already
  1435. * blocked the queue so future requests should not
  1436. * occur until the device transitions out of the
  1437. * suspend state.
  1438. */
  1439. SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
  1440. "queuecommand : device blocked\n"));
  1441. return SCSI_MLQUEUE_DEVICE_BUSY;
  1442. }
  1443. /* Store the LUN value in cmnd, if needed. */
  1444. if (cmd->device->lun_in_cdb)
  1445. cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
  1446. (cmd->device->lun << 5 & 0xe0);
  1447. scsi_log_send(cmd);
  1448. /*
  1449. * Before we queue this command, check if the command
  1450. * length exceeds what the host adapter can handle.
  1451. */
  1452. if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
  1453. SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
  1454. "queuecommand : command too long. "
  1455. "cdb_size=%d host->max_cmd_len=%d\n",
  1456. cmd->cmd_len, cmd->device->host->max_cmd_len));
  1457. cmd->result = (DID_ABORT << 16);
  1458. goto done;
  1459. }
  1460. if (unlikely(host->shost_state == SHOST_DEL)) {
  1461. cmd->result = (DID_NO_CONNECT << 16);
  1462. goto done;
  1463. }
  1464. trace_scsi_dispatch_cmd_start(cmd);
  1465. rtn = host->hostt->queuecommand(host, cmd);
  1466. if (rtn) {
  1467. trace_scsi_dispatch_cmd_error(cmd, rtn);
  1468. if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
  1469. rtn != SCSI_MLQUEUE_TARGET_BUSY)
  1470. rtn = SCSI_MLQUEUE_HOST_BUSY;
  1471. SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
  1472. "queuecommand : request rejected\n"));
  1473. }
  1474. return rtn;
  1475. done:
  1476. cmd->scsi_done(cmd);
  1477. return 0;
  1478. }
  1479. /**
  1480. * scsi_done - Invoke completion on finished SCSI command.
  1481. * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
  1482. * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
  1483. *
  1484. * Description: This function is the mid-level's (SCSI Core) interrupt routine,
  1485. * which regains ownership of the SCSI command (de facto) from a LLDD, and
  1486. * calls blk_complete_request() for further processing.
  1487. *
  1488. * This function is interrupt context safe.
  1489. */
  1490. static void scsi_done(struct scsi_cmnd *cmd)
  1491. {
  1492. trace_scsi_dispatch_cmd_done(cmd);
  1493. blk_complete_request(cmd->request);
  1494. }
  1495. /*
  1496. * Function: scsi_request_fn()
  1497. *
  1498. * Purpose: Main strategy routine for SCSI.
  1499. *
  1500. * Arguments: q - Pointer to actual queue.
  1501. *
  1502. * Returns: Nothing
  1503. *
  1504. * Lock status: IO request lock assumed to be held when called.
  1505. */
  1506. static void scsi_request_fn(struct request_queue *q)
  1507. __releases(q->queue_lock)
  1508. __acquires(q->queue_lock)
  1509. {
  1510. struct scsi_device *sdev = q->queuedata;
  1511. struct Scsi_Host *shost;
  1512. struct scsi_cmnd *cmd;
  1513. struct request *req;
  1514. /*
  1515. * To start with, we keep looping until the queue is empty, or until
  1516. * the host is no longer able to accept any more requests.
  1517. */
  1518. shost = sdev->host;
  1519. for (;;) {
  1520. int rtn;
  1521. /*
  1522. * get next queueable request. We do this early to make sure
  1523. * that the request is fully prepared even if we cannot
  1524. * accept it.
  1525. */
  1526. req = blk_peek_request(q);
  1527. if (!req)
  1528. break;
  1529. if (unlikely(!scsi_device_online(sdev))) {
  1530. sdev_printk(KERN_ERR, sdev,
  1531. "rejecting I/O to offline device\n");
  1532. scsi_kill_request(req, q);
  1533. continue;
  1534. }
  1535. if (!scsi_dev_queue_ready(q, sdev))
  1536. break;
  1537. /*
  1538. * Remove the request from the request list.
  1539. */
  1540. if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
  1541. blk_start_request(req);
  1542. spin_unlock_irq(q->queue_lock);
  1543. cmd = req->special;
  1544. if (unlikely(cmd == NULL)) {
  1545. printk(KERN_CRIT "impossible request in %s.\n"
  1546. "please mail a stack trace to "
  1547. "linux-scsi@vger.kernel.org\n",
  1548. __func__);
  1549. blk_dump_rq_flags(req, "foo");
  1550. BUG();
  1551. }
  1552. /*
  1553. * We hit this when the driver is using a host wide
  1554. * tag map. For device level tag maps the queue_depth check
  1555. * in the device ready fn would prevent us from trying
  1556. * to allocate a tag. Since the map is a shared host resource
  1557. * we add the dev to the starved list so it eventually gets
  1558. * a run when a tag is freed.
  1559. */
  1560. if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) {
  1561. spin_lock_irq(shost->host_lock);
  1562. if (list_empty(&sdev->starved_entry))
  1563. list_add_tail(&sdev->starved_entry,
  1564. &shost->starved_list);
  1565. spin_unlock_irq(shost->host_lock);
  1566. goto not_ready;
  1567. }
  1568. if (!scsi_target_queue_ready(shost, sdev))
  1569. goto not_ready;
  1570. if (!scsi_host_queue_ready(q, shost, sdev))
  1571. goto host_not_ready;
  1572. if (sdev->simple_tags)
  1573. cmd->flags |= SCMD_TAGGED;
  1574. else
  1575. cmd->flags &= ~SCMD_TAGGED;
  1576. /*
  1577. * Finally, initialize any error handling parameters, and set up
  1578. * the timers for timeouts.
  1579. */
  1580. scsi_init_cmd_errh(cmd);
  1581. /*
  1582. * Dispatch the command to the low-level driver.
  1583. */
  1584. cmd->scsi_done = scsi_done;
  1585. rtn = scsi_dispatch_cmd(cmd);
  1586. if (rtn) {
  1587. scsi_queue_insert(cmd, rtn);
  1588. spin_lock_irq(q->queue_lock);
  1589. goto out_delay;
  1590. }
  1591. spin_lock_irq(q->queue_lock);
  1592. }
  1593. return;
  1594. host_not_ready:
  1595. if (scsi_target(sdev)->can_queue > 0)
  1596. atomic_dec(&scsi_target(sdev)->target_busy);
  1597. not_ready:
  1598. /*
  1599. * lock q, handle tag, requeue req, and decrement device_busy. We
  1600. * must return with queue_lock held.
  1601. *
  1602. * Decrementing device_busy without checking it is OK, as all such
  1603. * cases (host limits or settings) should run the queue at some
  1604. * later time.
  1605. */
  1606. spin_lock_irq(q->queue_lock);
  1607. blk_requeue_request(q, req);
  1608. atomic_dec(&sdev->device_busy);
  1609. out_delay:
  1610. if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
  1611. blk_delay_queue(q, SCSI_QUEUE_DELAY);
  1612. }
  1613. static inline int prep_to_mq(int ret)
  1614. {
  1615. switch (ret) {
  1616. case BLKPREP_OK:
  1617. return BLK_MQ_RQ_QUEUE_OK;
  1618. case BLKPREP_DEFER:
  1619. return BLK_MQ_RQ_QUEUE_BUSY;
  1620. default:
  1621. return BLK_MQ_RQ_QUEUE_ERROR;
  1622. }
  1623. }
  1624. static int scsi_mq_prep_fn(struct request *req)
  1625. {
  1626. struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
  1627. struct scsi_device *sdev = req->q->queuedata;
  1628. struct Scsi_Host *shost = sdev->host;
  1629. unsigned char *sense_buf = cmd->sense_buffer;
  1630. struct scatterlist *sg;
  1631. /* zero out the cmd, except for the embedded scsi_request */
  1632. memset((char *)cmd + sizeof(cmd->req), 0,
  1633. sizeof(*cmd) - sizeof(cmd->req));
  1634. req->special = cmd;
  1635. cmd->request = req;
  1636. cmd->device = sdev;
  1637. cmd->sense_buffer = sense_buf;
  1638. cmd->tag = req->tag;
  1639. cmd->prot_op = SCSI_PROT_NORMAL;
  1640. INIT_LIST_HEAD(&cmd->list);
  1641. INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
  1642. cmd->jiffies_at_alloc = jiffies;
  1643. if (shost->use_cmd_list) {
  1644. spin_lock_irq(&sdev->list_lock);
  1645. list_add_tail(&cmd->list, &sdev->cmd_list);
  1646. spin_unlock_irq(&sdev->list_lock);
  1647. }
  1648. sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
  1649. cmd->sdb.table.sgl = sg;
  1650. if (scsi_host_get_prot(shost)) {
  1651. cmd->prot_sdb = (void *)sg +
  1652. min_t(unsigned int,
  1653. shost->sg_tablesize, SG_CHUNK_SIZE) *
  1654. sizeof(struct scatterlist);
  1655. memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
  1656. cmd->prot_sdb->table.sgl =
  1657. (struct scatterlist *)(cmd->prot_sdb + 1);
  1658. }
  1659. if (blk_bidi_rq(req)) {
  1660. struct request *next_rq = req->next_rq;
  1661. struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
  1662. memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
  1663. bidi_sdb->table.sgl =
  1664. (struct scatterlist *)(bidi_sdb + 1);
  1665. next_rq->special = bidi_sdb;
  1666. }
  1667. blk_mq_start_request(req);
  1668. return scsi_setup_cmnd(sdev, req);
  1669. }
  1670. static void scsi_mq_done(struct scsi_cmnd *cmd)
  1671. {
  1672. trace_scsi_dispatch_cmd_done(cmd);
  1673. blk_mq_complete_request(cmd->request);
  1674. }
  1675. static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
  1676. const struct blk_mq_queue_data *bd)
  1677. {
  1678. struct request *req = bd->rq;
  1679. struct request_queue *q = req->q;
  1680. struct scsi_device *sdev = q->queuedata;
  1681. struct Scsi_Host *shost = sdev->host;
  1682. struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
  1683. int ret;
  1684. int reason;
  1685. ret = prep_to_mq(scsi_prep_state_check(sdev, req));
  1686. if (ret != BLK_MQ_RQ_QUEUE_OK)
  1687. goto out;
  1688. ret = BLK_MQ_RQ_QUEUE_BUSY;
  1689. if (!get_device(&sdev->sdev_gendev))
  1690. goto out;
  1691. if (!scsi_dev_queue_ready(q, sdev))
  1692. goto out_put_device;
  1693. if (!scsi_target_queue_ready(shost, sdev))
  1694. goto out_dec_device_busy;
  1695. if (!scsi_host_queue_ready(q, shost, sdev))
  1696. goto out_dec_target_busy;
  1697. if (!(req->rq_flags & RQF_DONTPREP)) {
  1698. ret = prep_to_mq(scsi_mq_prep_fn(req));
  1699. if (ret != BLK_MQ_RQ_QUEUE_OK)
  1700. goto out_dec_host_busy;
  1701. req->rq_flags |= RQF_DONTPREP;
  1702. } else {
  1703. blk_mq_start_request(req);
  1704. }
  1705. if (sdev->simple_tags)
  1706. cmd->flags |= SCMD_TAGGED;
  1707. else
  1708. cmd->flags &= ~SCMD_TAGGED;
  1709. scsi_init_cmd_errh(cmd);
  1710. cmd->scsi_done = scsi_mq_done;
  1711. reason = scsi_dispatch_cmd(cmd);
  1712. if (reason) {
  1713. scsi_set_blocked(cmd, reason);
  1714. ret = BLK_MQ_RQ_QUEUE_BUSY;
  1715. goto out_dec_host_busy;
  1716. }
  1717. return BLK_MQ_RQ_QUEUE_OK;
  1718. out_dec_host_busy:
  1719. atomic_dec(&shost->host_busy);
  1720. out_dec_target_busy:
  1721. if (scsi_target(sdev)->can_queue > 0)
  1722. atomic_dec(&scsi_target(sdev)->target_busy);
  1723. out_dec_device_busy:
  1724. atomic_dec(&sdev->device_busy);
  1725. out_put_device:
  1726. put_device(&sdev->sdev_gendev);
  1727. out:
  1728. switch (ret) {
  1729. case BLK_MQ_RQ_QUEUE_BUSY:
  1730. if (atomic_read(&sdev->device_busy) == 0 &&
  1731. !scsi_device_blocked(sdev))
  1732. blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
  1733. break;
  1734. case BLK_MQ_RQ_QUEUE_ERROR:
  1735. /*
  1736. * Make sure to release all allocated ressources when
  1737. * we hit an error, as we will never see this command
  1738. * again.
  1739. */
  1740. if (req->rq_flags & RQF_DONTPREP)
  1741. scsi_mq_uninit_cmd(cmd);
  1742. break;
  1743. default:
  1744. break;
  1745. }
  1746. return ret;
  1747. }
  1748. static enum blk_eh_timer_return scsi_timeout(struct request *req,
  1749. bool reserved)
  1750. {
  1751. if (reserved)
  1752. return BLK_EH_RESET_TIMER;
  1753. return scsi_times_out(req);
  1754. }
  1755. static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
  1756. unsigned int hctx_idx, unsigned int numa_node)
  1757. {
  1758. struct Scsi_Host *shost = set->driver_data;
  1759. struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
  1760. cmd->sense_buffer =
  1761. scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node);
  1762. if (!cmd->sense_buffer)
  1763. return -ENOMEM;
  1764. cmd->req.sense = cmd->sense_buffer;
  1765. return 0;
  1766. }
  1767. static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq,
  1768. unsigned int hctx_idx)
  1769. {
  1770. struct Scsi_Host *shost = set->driver_data;
  1771. struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
  1772. scsi_free_sense_buffer(shost, cmd->sense_buffer);
  1773. }
  1774. static int scsi_map_queues(struct blk_mq_tag_set *set)
  1775. {
  1776. struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
  1777. if (shost->hostt->map_queues)
  1778. return shost->hostt->map_queues(shost);
  1779. return blk_mq_map_queues(set);
  1780. }
  1781. static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
  1782. {
  1783. struct device *host_dev;
  1784. u64 bounce_limit = 0xffffffff;
  1785. if (shost->unchecked_isa_dma)
  1786. return BLK_BOUNCE_ISA;
  1787. /*
  1788. * Platforms with virtual-DMA translation
  1789. * hardware have no practical limit.
  1790. */
  1791. if (!PCI_DMA_BUS_IS_PHYS)
  1792. return BLK_BOUNCE_ANY;
  1793. host_dev = scsi_get_device(shost);
  1794. if (host_dev && host_dev->dma_mask)
  1795. bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
  1796. return bounce_limit;
  1797. }
  1798. void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
  1799. {
  1800. struct device *dev = shost->dma_dev;
  1801. /*
  1802. * this limit is imposed by hardware restrictions
  1803. */
  1804. blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
  1805. SG_MAX_SEGMENTS));
  1806. if (scsi_host_prot_dma(shost)) {
  1807. shost->sg_prot_tablesize =
  1808. min_not_zero(shost->sg_prot_tablesize,
  1809. (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
  1810. BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
  1811. blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
  1812. }
  1813. blk_queue_max_hw_sectors(q, shost->max_sectors);
  1814. blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
  1815. blk_queue_segment_boundary(q, shost->dma_boundary);
  1816. dma_set_seg_boundary(dev, shost->dma_boundary);
  1817. blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
  1818. if (!shost->use_clustering)
  1819. q->limits.cluster = 0;
  1820. /*
  1821. * set a reasonable default alignment on word boundaries: the
  1822. * host and device may alter it using
  1823. * blk_queue_update_dma_alignment() later.
  1824. */
  1825. blk_queue_dma_alignment(q, 0x03);
  1826. }
  1827. EXPORT_SYMBOL_GPL(__scsi_init_queue);
  1828. static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
  1829. {
  1830. struct Scsi_Host *shost = q->rq_alloc_data;
  1831. struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
  1832. memset(cmd, 0, sizeof(*cmd));
  1833. cmd->sense_buffer = scsi_alloc_sense_buffer(shost, gfp, NUMA_NO_NODE);
  1834. if (!cmd->sense_buffer)
  1835. goto fail;
  1836. cmd->req.sense = cmd->sense_buffer;
  1837. if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
  1838. cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp);
  1839. if (!cmd->prot_sdb)
  1840. goto fail_free_sense;
  1841. }
  1842. return 0;
  1843. fail_free_sense:
  1844. scsi_free_sense_buffer(shost, cmd->sense_buffer);
  1845. fail:
  1846. return -ENOMEM;
  1847. }
  1848. static void scsi_exit_rq(struct request_queue *q, struct request *rq)
  1849. {
  1850. struct Scsi_Host *shost = q->rq_alloc_data;
  1851. struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
  1852. if (cmd->prot_sdb)
  1853. kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
  1854. scsi_free_sense_buffer(shost, cmd->sense_buffer);
  1855. }
  1856. struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
  1857. {
  1858. struct Scsi_Host *shost = sdev->host;
  1859. struct request_queue *q;
  1860. q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
  1861. if (!q)
  1862. return NULL;
  1863. q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
  1864. q->rq_alloc_data = shost;
  1865. q->request_fn = scsi_request_fn;
  1866. q->init_rq_fn = scsi_init_rq;
  1867. q->exit_rq_fn = scsi_exit_rq;
  1868. if (blk_init_allocated_queue(q) < 0) {
  1869. blk_cleanup_queue(q);
  1870. return NULL;
  1871. }
  1872. __scsi_init_queue(shost, q);
  1873. blk_queue_prep_rq(q, scsi_prep_fn);
  1874. blk_queue_unprep_rq(q, scsi_unprep_fn);
  1875. blk_queue_softirq_done(q, scsi_softirq_done);
  1876. blk_queue_rq_timed_out(q, scsi_times_out);
  1877. blk_queue_lld_busy(q, scsi_lld_busy);
  1878. return q;
  1879. }
  1880. static const struct blk_mq_ops scsi_mq_ops = {
  1881. .queue_rq = scsi_queue_rq,
  1882. .complete = scsi_softirq_done,
  1883. .timeout = scsi_timeout,
  1884. #ifdef CONFIG_BLK_DEBUG_FS
  1885. .show_rq = scsi_show_rq,
  1886. #endif
  1887. .init_request = scsi_init_request,
  1888. .exit_request = scsi_exit_request,
  1889. .map_queues = scsi_map_queues,
  1890. };
  1891. struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
  1892. {
  1893. sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
  1894. if (IS_ERR(sdev->request_queue))
  1895. return NULL;
  1896. sdev->request_queue->queuedata = sdev;
  1897. __scsi_init_queue(sdev->host, sdev->request_queue);
  1898. return sdev->request_queue;
  1899. }
  1900. int scsi_mq_setup_tags(struct Scsi_Host *shost)
  1901. {
  1902. unsigned int cmd_size, sgl_size, tbl_size;
  1903. tbl_size = shost->sg_tablesize;
  1904. if (tbl_size > SG_CHUNK_SIZE)
  1905. tbl_size = SG_CHUNK_SIZE;
  1906. sgl_size = tbl_size * sizeof(struct scatterlist);
  1907. cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
  1908. if (scsi_host_get_prot(shost))
  1909. cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
  1910. memset(&shost->tag_set, 0, sizeof(shost->tag_set));
  1911. shost->tag_set.ops = &scsi_mq_ops;
  1912. shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
  1913. shost->tag_set.queue_depth = shost->can_queue;
  1914. shost->tag_set.cmd_size = cmd_size;
  1915. shost->tag_set.numa_node = NUMA_NO_NODE;
  1916. shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
  1917. shost->tag_set.flags |=
  1918. BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
  1919. shost->tag_set.driver_data = shost;
  1920. return blk_mq_alloc_tag_set(&shost->tag_set);
  1921. }
  1922. void scsi_mq_destroy_tags(struct Scsi_Host *shost)
  1923. {
  1924. blk_mq_free_tag_set(&shost->tag_set);
  1925. }
  1926. /**
  1927. * scsi_device_from_queue - return sdev associated with a request_queue
  1928. * @q: The request queue to return the sdev from
  1929. *
  1930. * Return the sdev associated with a request queue or NULL if the
  1931. * request_queue does not reference a SCSI device.
  1932. */
  1933. struct scsi_device *scsi_device_from_queue(struct request_queue *q)
  1934. {
  1935. struct scsi_device *sdev = NULL;
  1936. if (q->mq_ops) {
  1937. if (q->mq_ops == &scsi_mq_ops)
  1938. sdev = q->queuedata;
  1939. } else if (q->request_fn == scsi_request_fn)
  1940. sdev = q->queuedata;
  1941. if (!sdev || !get_device(&sdev->sdev_gendev))
  1942. sdev = NULL;
  1943. return sdev;
  1944. }
  1945. EXPORT_SYMBOL_GPL(scsi_device_from_queue);
  1946. /*
  1947. * Function: scsi_block_requests()
  1948. *
  1949. * Purpose: Utility function used by low-level drivers to prevent further
  1950. * commands from being queued to the device.
  1951. *
  1952. * Arguments: shost - Host in question
  1953. *
  1954. * Returns: Nothing
  1955. *
  1956. * Lock status: No locks are assumed held.
  1957. *
  1958. * Notes: There is no timer nor any other means by which the requests
  1959. * get unblocked other than the low-level driver calling
  1960. * scsi_unblock_requests().
  1961. */
  1962. void scsi_block_requests(struct Scsi_Host *shost)
  1963. {
  1964. shost->host_self_blocked = 1;
  1965. }
  1966. EXPORT_SYMBOL(scsi_block_requests);
  1967. /*
  1968. * Function: scsi_unblock_requests()
  1969. *
  1970. * Purpose: Utility function used by low-level drivers to allow further
  1971. * commands from being queued to the device.
  1972. *
  1973. * Arguments: shost - Host in question
  1974. *
  1975. * Returns: Nothing
  1976. *
  1977. * Lock status: No locks are assumed held.
  1978. *
  1979. * Notes: There is no timer nor any other means by which the requests
  1980. * get unblocked other than the low-level driver calling
  1981. * scsi_unblock_requests().
  1982. *
  1983. * This is done as an API function so that changes to the
  1984. * internals of the scsi mid-layer won't require wholesale
  1985. * changes to drivers that use this feature.
  1986. */
  1987. void scsi_unblock_requests(struct Scsi_Host *shost)
  1988. {
  1989. shost->host_self_blocked = 0;
  1990. scsi_run_host_queues(shost);
  1991. }
  1992. EXPORT_SYMBOL(scsi_unblock_requests);
  1993. int __init scsi_init_queue(void)
  1994. {
  1995. scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
  1996. sizeof(struct scsi_data_buffer),
  1997. 0, 0, NULL);
  1998. if (!scsi_sdb_cache) {
  1999. printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
  2000. return -ENOMEM;
  2001. }
  2002. return 0;
  2003. }
  2004. void scsi_exit_queue(void)
  2005. {
  2006. kmem_cache_destroy(scsi_sense_cache);
  2007. kmem_cache_destroy(scsi_sense_isadma_cache);
  2008. kmem_cache_destroy(scsi_sdb_cache);
  2009. }
  2010. /**
  2011. * scsi_mode_select - issue a mode select
  2012. * @sdev: SCSI device to be queried
  2013. * @pf: Page format bit (1 == standard, 0 == vendor specific)
  2014. * @sp: Save page bit (0 == don't save, 1 == save)
  2015. * @modepage: mode page being requested
  2016. * @buffer: request buffer (may not be smaller than eight bytes)
  2017. * @len: length of request buffer.
  2018. * @timeout: command timeout
  2019. * @retries: number of retries before failing
  2020. * @data: returns a structure abstracting the mode header data
  2021. * @sshdr: place to put sense data (or NULL if no sense to be collected).
  2022. * must be SCSI_SENSE_BUFFERSIZE big.
  2023. *
  2024. * Returns zero if successful; negative error number or scsi
  2025. * status on error
  2026. *
  2027. */
  2028. int
  2029. scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
  2030. unsigned char *buffer, int len, int timeout, int retries,
  2031. struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
  2032. {
  2033. unsigned char cmd[10];
  2034. unsigned char *real_buffer;
  2035. int ret;
  2036. memset(cmd, 0, sizeof(cmd));
  2037. cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
  2038. if (sdev->use_10_for_ms) {
  2039. if (len > 65535)
  2040. return -EINVAL;
  2041. real_buffer = kmalloc(8 + len, GFP_KERNEL);
  2042. if (!real_buffer)
  2043. return -ENOMEM;
  2044. memcpy(real_buffer + 8, buffer, len);
  2045. len += 8;
  2046. real_buffer[0] = 0;
  2047. real_buffer[1] = 0;
  2048. real_buffer[2] = data->medium_type;
  2049. real_buffer[3] = data->device_specific;
  2050. real_buffer[4] = data->longlba ? 0x01 : 0;
  2051. real_buffer[5] = 0;
  2052. real_buffer[6] = data->block_descriptor_length >> 8;
  2053. real_buffer[7] = data->block_descriptor_length;
  2054. cmd[0] = MODE_SELECT_10;
  2055. cmd[7] = len >> 8;
  2056. cmd[8] = len;
  2057. } else {
  2058. if (len > 255 || data->block_descriptor_length > 255 ||
  2059. data->longlba)
  2060. return -EINVAL;
  2061. real_buffer = kmalloc(4 + len, GFP_KERNEL);
  2062. if (!real_buffer)
  2063. return -ENOMEM;
  2064. memcpy(real_buffer + 4, buffer, len);
  2065. len += 4;
  2066. real_buffer[0] = 0;
  2067. real_buffer[1] = data->medium_type;
  2068. real_buffer[2] = data->device_specific;
  2069. real_buffer[3] = data->block_descriptor_length;
  2070. cmd[0] = MODE_SELECT;
  2071. cmd[4] = len;
  2072. }
  2073. ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
  2074. sshdr, timeout, retries, NULL);
  2075. kfree(real_buffer);
  2076. return ret;
  2077. }
  2078. EXPORT_SYMBOL_GPL(scsi_mode_select);
  2079. /**
  2080. * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
  2081. * @sdev: SCSI device to be queried
  2082. * @dbd: set if mode sense will allow block descriptors to be returned
  2083. * @modepage: mode page being requested
  2084. * @buffer: request buffer (may not be smaller than eight bytes)
  2085. * @len: length of request buffer.
  2086. * @timeout: command timeout
  2087. * @retries: number of retries before failing
  2088. * @data: returns a structure abstracting the mode header data
  2089. * @sshdr: place to put sense data (or NULL if no sense to be collected).
  2090. * must be SCSI_SENSE_BUFFERSIZE big.
  2091. *
  2092. * Returns zero if unsuccessful, or the header offset (either 4
  2093. * or 8 depending on whether a six or ten byte command was
  2094. * issued) if successful.
  2095. */
  2096. int
  2097. scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
  2098. unsigned char *buffer, int len, int timeout, int retries,
  2099. struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
  2100. {
  2101. unsigned char cmd[12];
  2102. int use_10_for_ms;
  2103. int header_length;
  2104. int result, retry_count = retries;
  2105. struct scsi_sense_hdr my_sshdr;
  2106. memset(data, 0, sizeof(*data));
  2107. memset(&cmd[0], 0, 12);
  2108. cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
  2109. cmd[2] = modepage;
  2110. /* caller might not be interested in sense, but we need it */
  2111. if (!sshdr)
  2112. sshdr = &my_sshdr;
  2113. retry:
  2114. use_10_for_ms = sdev->use_10_for_ms;
  2115. if (use_10_for_ms) {
  2116. if (len < 8)
  2117. len = 8;
  2118. cmd[0] = MODE_SENSE_10;
  2119. cmd[8] = len;
  2120. header_length = 8;
  2121. } else {
  2122. if (len < 4)
  2123. len = 4;
  2124. cmd[0] = MODE_SENSE;
  2125. cmd[4] = len;
  2126. header_length = 4;
  2127. }
  2128. memset(buffer, 0, len);
  2129. result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
  2130. sshdr, timeout, retries, NULL);
  2131. /* This code looks awful: what it's doing is making sure an
  2132. * ILLEGAL REQUEST sense return identifies the actual command
  2133. * byte as the problem. MODE_SENSE commands can return
  2134. * ILLEGAL REQUEST if the code page isn't supported */
  2135. if (use_10_for_ms && !scsi_status_is_good(result) &&
  2136. (driver_byte(result) & DRIVER_SENSE)) {
  2137. if (scsi_sense_valid(sshdr)) {
  2138. if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
  2139. (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
  2140. /*
  2141. * Invalid command operation code
  2142. */
  2143. sdev->use_10_for_ms = 0;
  2144. goto retry;
  2145. }
  2146. }
  2147. }
  2148. if(scsi_status_is_good(result)) {
  2149. if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
  2150. (modepage == 6 || modepage == 8))) {
  2151. /* Initio breakage? */
  2152. header_length = 0;
  2153. data->length = 13;
  2154. data->medium_type = 0;
  2155. data->device_specific = 0;
  2156. data->longlba = 0;
  2157. data->block_descriptor_length = 0;
  2158. } else if(use_10_for_ms) {
  2159. data->length = buffer[0]*256 + buffer[1] + 2;
  2160. data->medium_type = buffer[2];
  2161. data->device_specific = buffer[3];
  2162. data->longlba = buffer[4] & 0x01;
  2163. data->block_descriptor_length = buffer[6]*256
  2164. + buffer[7];
  2165. } else {
  2166. data->length = buffer[0] + 1;
  2167. data->medium_type = buffer[1];
  2168. data->device_specific = buffer[2];
  2169. data->block_descriptor_length = buffer[3];
  2170. }
  2171. data->header_length = header_length;
  2172. } else if ((status_byte(result) == CHECK_CONDITION) &&
  2173. scsi_sense_valid(sshdr) &&
  2174. sshdr->sense_key == UNIT_ATTENTION && retry_count) {
  2175. retry_count--;
  2176. goto retry;
  2177. }
  2178. return result;
  2179. }
  2180. EXPORT_SYMBOL(scsi_mode_sense);
  2181. /**
  2182. * scsi_test_unit_ready - test if unit is ready
  2183. * @sdev: scsi device to change the state of.
  2184. * @timeout: command timeout
  2185. * @retries: number of retries before failing
  2186. * @sshdr: outpout pointer for decoded sense information.
  2187. *
  2188. * Returns zero if unsuccessful or an error if TUR failed. For
  2189. * removable media, UNIT_ATTENTION sets ->changed flag.
  2190. **/
  2191. int
  2192. scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
  2193. struct scsi_sense_hdr *sshdr)
  2194. {
  2195. char cmd[] = {
  2196. TEST_UNIT_READY, 0, 0, 0, 0, 0,
  2197. };
  2198. int result;
  2199. /* try to eat the UNIT_ATTENTION if there are enough retries */
  2200. do {
  2201. result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
  2202. timeout, retries, NULL);
  2203. if (sdev->removable && scsi_sense_valid(sshdr) &&
  2204. sshdr->sense_key == UNIT_ATTENTION)
  2205. sdev->changed = 1;
  2206. } while (scsi_sense_valid(sshdr) &&
  2207. sshdr->sense_key == UNIT_ATTENTION && --retries);
  2208. return result;
  2209. }
  2210. EXPORT_SYMBOL(scsi_test_unit_ready);
  2211. /**
  2212. * scsi_device_set_state - Take the given device through the device state model.
  2213. * @sdev: scsi device to change the state of.
  2214. * @state: state to change to.
  2215. *
  2216. * Returns zero if unsuccessful or an error if the requested
  2217. * transition is illegal.
  2218. */
  2219. int
  2220. scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
  2221. {
  2222. enum scsi_device_state oldstate = sdev->sdev_state;
  2223. if (state == oldstate)
  2224. return 0;
  2225. switch (state) {
  2226. case SDEV_CREATED:
  2227. switch (oldstate) {
  2228. case SDEV_CREATED_BLOCK:
  2229. break;
  2230. default:
  2231. goto illegal;
  2232. }
  2233. break;
  2234. case SDEV_RUNNING:
  2235. switch (oldstate) {
  2236. case SDEV_CREATED:
  2237. case SDEV_OFFLINE:
  2238. case SDEV_TRANSPORT_OFFLINE:
  2239. case SDEV_QUIESCE:
  2240. case SDEV_BLOCK:
  2241. break;
  2242. default:
  2243. goto illegal;
  2244. }
  2245. break;
  2246. case SDEV_QUIESCE:
  2247. switch (oldstate) {
  2248. case SDEV_RUNNING:
  2249. case SDEV_OFFLINE:
  2250. case SDEV_TRANSPORT_OFFLINE:
  2251. break;
  2252. default:
  2253. goto illegal;
  2254. }
  2255. break;
  2256. case SDEV_OFFLINE:
  2257. case SDEV_TRANSPORT_OFFLINE:
  2258. switch (oldstate) {
  2259. case SDEV_CREATED:
  2260. case SDEV_RUNNING:
  2261. case SDEV_QUIESCE:
  2262. case SDEV_BLOCK:
  2263. break;
  2264. default:
  2265. goto illegal;
  2266. }
  2267. break;
  2268. case SDEV_BLOCK:
  2269. switch (oldstate) {
  2270. case SDEV_RUNNING:
  2271. case SDEV_CREATED_BLOCK:
  2272. break;
  2273. default:
  2274. goto illegal;
  2275. }
  2276. break;
  2277. case SDEV_CREATED_BLOCK:
  2278. switch (oldstate) {
  2279. case SDEV_CREATED:
  2280. break;
  2281. default:
  2282. goto illegal;
  2283. }
  2284. break;
  2285. case SDEV_CANCEL:
  2286. switch (oldstate) {
  2287. case SDEV_CREATED:
  2288. case SDEV_RUNNING:
  2289. case SDEV_QUIESCE:
  2290. case SDEV_OFFLINE:
  2291. case SDEV_TRANSPORT_OFFLINE:
  2292. case SDEV_BLOCK:
  2293. break;
  2294. default:
  2295. goto illegal;
  2296. }
  2297. break;
  2298. case SDEV_DEL:
  2299. switch (oldstate) {
  2300. case SDEV_CREATED:
  2301. case SDEV_RUNNING:
  2302. case SDEV_OFFLINE:
  2303. case SDEV_TRANSPORT_OFFLINE:
  2304. case SDEV_CANCEL:
  2305. case SDEV_CREATED_BLOCK:
  2306. break;
  2307. default:
  2308. goto illegal;
  2309. }
  2310. break;
  2311. }
  2312. sdev->sdev_state = state;
  2313. return 0;
  2314. illegal:
  2315. SCSI_LOG_ERROR_RECOVERY(1,
  2316. sdev_printk(KERN_ERR, sdev,
  2317. "Illegal state transition %s->%s",
  2318. scsi_device_state_name(oldstate),
  2319. scsi_device_state_name(state))
  2320. );
  2321. return -EINVAL;
  2322. }
  2323. EXPORT_SYMBOL(scsi_device_set_state);
  2324. /**
  2325. * sdev_evt_emit - emit a single SCSI device uevent
  2326. * @sdev: associated SCSI device
  2327. * @evt: event to emit
  2328. *
  2329. * Send a single uevent (scsi_event) to the associated scsi_device.
  2330. */
  2331. static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
  2332. {
  2333. int idx = 0;
  2334. char *envp[3];
  2335. switch (evt->evt_type) {
  2336. case SDEV_EVT_MEDIA_CHANGE:
  2337. envp[idx++] = "SDEV_MEDIA_CHANGE=1";
  2338. break;
  2339. case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
  2340. scsi_rescan_device(&sdev->sdev_gendev);
  2341. envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
  2342. break;
  2343. case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
  2344. envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
  2345. break;
  2346. case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
  2347. envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
  2348. break;
  2349. case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
  2350. envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
  2351. break;
  2352. case SDEV_EVT_LUN_CHANGE_REPORTED:
  2353. envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
  2354. break;
  2355. case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
  2356. envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
  2357. break;
  2358. default:
  2359. /* do nothing */
  2360. break;
  2361. }
  2362. envp[idx++] = NULL;
  2363. kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
  2364. }
  2365. /**
  2366. * sdev_evt_thread - send a uevent for each scsi event
  2367. * @work: work struct for scsi_device
  2368. *
  2369. * Dispatch queued events to their associated scsi_device kobjects
  2370. * as uevents.
  2371. */
  2372. void scsi_evt_thread(struct work_struct *work)
  2373. {
  2374. struct scsi_device *sdev;
  2375. enum scsi_device_event evt_type;
  2376. LIST_HEAD(event_list);
  2377. sdev = container_of(work, struct scsi_device, event_work);
  2378. for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
  2379. if (test_and_clear_bit(evt_type, sdev->pending_events))
  2380. sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
  2381. while (1) {
  2382. struct scsi_event *evt;
  2383. struct list_head *this, *tmp;
  2384. unsigned long flags;
  2385. spin_lock_irqsave(&sdev->list_lock, flags);
  2386. list_splice_init(&sdev->event_list, &event_list);
  2387. spin_unlock_irqrestore(&sdev->list_lock, flags);
  2388. if (list_empty(&event_list))
  2389. break;
  2390. list_for_each_safe(this, tmp, &event_list) {
  2391. evt = list_entry(this, struct scsi_event, node);
  2392. list_del(&evt->node);
  2393. scsi_evt_emit(sdev, evt);
  2394. kfree(evt);
  2395. }
  2396. }
  2397. }
  2398. /**
  2399. * sdev_evt_send - send asserted event to uevent thread
  2400. * @sdev: scsi_device event occurred on
  2401. * @evt: event to send
  2402. *
  2403. * Assert scsi device event asynchronously.
  2404. */
  2405. void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
  2406. {
  2407. unsigned long flags;
  2408. #if 0
  2409. /* FIXME: currently this check eliminates all media change events
  2410. * for polled devices. Need to update to discriminate between AN
  2411. * and polled events */
  2412. if (!test_bit(evt->evt_type, sdev->supported_events)) {
  2413. kfree(evt);
  2414. return;
  2415. }
  2416. #endif
  2417. spin_lock_irqsave(&sdev->list_lock, flags);
  2418. list_add_tail(&evt->node, &sdev->event_list);
  2419. schedule_work(&sdev->event_work);
  2420. spin_unlock_irqrestore(&sdev->list_lock, flags);
  2421. }
  2422. EXPORT_SYMBOL_GPL(sdev_evt_send);
  2423. /**
  2424. * sdev_evt_alloc - allocate a new scsi event
  2425. * @evt_type: type of event to allocate
  2426. * @gfpflags: GFP flags for allocation
  2427. *
  2428. * Allocates and returns a new scsi_event.
  2429. */
  2430. struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
  2431. gfp_t gfpflags)
  2432. {
  2433. struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
  2434. if (!evt)
  2435. return NULL;
  2436. evt->evt_type = evt_type;
  2437. INIT_LIST_HEAD(&evt->node);
  2438. /* evt_type-specific initialization, if any */
  2439. switch (evt_type) {
  2440. case SDEV_EVT_MEDIA_CHANGE:
  2441. case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
  2442. case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
  2443. case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
  2444. case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
  2445. case SDEV_EVT_LUN_CHANGE_REPORTED:
  2446. case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
  2447. default:
  2448. /* do nothing */
  2449. break;
  2450. }
  2451. return evt;
  2452. }
  2453. EXPORT_SYMBOL_GPL(sdev_evt_alloc);
  2454. /**
  2455. * sdev_evt_send_simple - send asserted event to uevent thread
  2456. * @sdev: scsi_device event occurred on
  2457. * @evt_type: type of event to send
  2458. * @gfpflags: GFP flags for allocation
  2459. *
  2460. * Assert scsi device event asynchronously, given an event type.
  2461. */
  2462. void sdev_evt_send_simple(struct scsi_device *sdev,
  2463. enum scsi_device_event evt_type, gfp_t gfpflags)
  2464. {
  2465. struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
  2466. if (!evt) {
  2467. sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
  2468. evt_type);
  2469. return;
  2470. }
  2471. sdev_evt_send(sdev, evt);
  2472. }
  2473. EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
  2474. /**
  2475. * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
  2476. * @sdev: SCSI device to count the number of scsi_request_fn() callers for.
  2477. */
  2478. static int scsi_request_fn_active(struct scsi_device *sdev)
  2479. {
  2480. struct request_queue *q = sdev->request_queue;
  2481. int request_fn_active;
  2482. WARN_ON_ONCE(sdev->host->use_blk_mq);
  2483. spin_lock_irq(q->queue_lock);
  2484. request_fn_active = q->request_fn_active;
  2485. spin_unlock_irq(q->queue_lock);
  2486. return request_fn_active;
  2487. }
  2488. /**
  2489. * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
  2490. * @sdev: SCSI device pointer.
  2491. *
  2492. * Wait until the ongoing shost->hostt->queuecommand() calls that are
  2493. * invoked from scsi_request_fn() have finished.
  2494. */
  2495. static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
  2496. {
  2497. WARN_ON_ONCE(sdev->host->use_blk_mq);
  2498. while (scsi_request_fn_active(sdev))
  2499. msleep(20);
  2500. }
  2501. /**
  2502. * scsi_device_quiesce - Block user issued commands.
  2503. * @sdev: scsi device to quiesce.
  2504. *
  2505. * This works by trying to transition to the SDEV_QUIESCE state
  2506. * (which must be a legal transition). When the device is in this
  2507. * state, only special requests will be accepted, all others will
  2508. * be deferred. Since special requests may also be requeued requests,
  2509. * a successful return doesn't guarantee the device will be
  2510. * totally quiescent.
  2511. *
  2512. * Must be called with user context, may sleep.
  2513. *
  2514. * Returns zero if unsuccessful or an error if not.
  2515. */
  2516. int
  2517. scsi_device_quiesce(struct scsi_device *sdev)
  2518. {
  2519. int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
  2520. if (err)
  2521. return err;
  2522. scsi_run_queue(sdev->request_queue);
  2523. while (atomic_read(&sdev->device_busy)) {
  2524. msleep_interruptible(200);
  2525. scsi_run_queue(sdev->request_queue);
  2526. }
  2527. return 0;
  2528. }
  2529. EXPORT_SYMBOL(scsi_device_quiesce);
  2530. /**
  2531. * scsi_device_resume - Restart user issued commands to a quiesced device.
  2532. * @sdev: scsi device to resume.
  2533. *
  2534. * Moves the device from quiesced back to running and restarts the
  2535. * queues.
  2536. *
  2537. * Must be called with user context, may sleep.
  2538. */
  2539. void scsi_device_resume(struct scsi_device *sdev)
  2540. {
  2541. /* check if the device state was mutated prior to resume, and if
  2542. * so assume the state is being managed elsewhere (for example
  2543. * device deleted during suspend)
  2544. */
  2545. if (sdev->sdev_state != SDEV_QUIESCE ||
  2546. scsi_device_set_state(sdev, SDEV_RUNNING))
  2547. return;
  2548. scsi_run_queue(sdev->request_queue);
  2549. }
  2550. EXPORT_SYMBOL(scsi_device_resume);
  2551. static void
  2552. device_quiesce_fn(struct scsi_device *sdev, void *data)
  2553. {
  2554. scsi_device_quiesce(sdev);
  2555. }
  2556. void
  2557. scsi_target_quiesce(struct scsi_target *starget)
  2558. {
  2559. starget_for_each_device(starget, NULL, device_quiesce_fn);
  2560. }
  2561. EXPORT_SYMBOL(scsi_target_quiesce);
  2562. static void
  2563. device_resume_fn(struct scsi_device *sdev, void *data)
  2564. {
  2565. scsi_device_resume(sdev);
  2566. }
  2567. void
  2568. scsi_target_resume(struct scsi_target *starget)
  2569. {
  2570. starget_for_each_device(starget, NULL, device_resume_fn);
  2571. }
  2572. EXPORT_SYMBOL(scsi_target_resume);
  2573. /**
  2574. * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
  2575. * @sdev: device to block
  2576. * @wait: Whether or not to wait until ongoing .queuecommand() /
  2577. * .queue_rq() calls have finished.
  2578. *
  2579. * Block request made by scsi lld's to temporarily stop all
  2580. * scsi commands on the specified device. May sleep.
  2581. *
  2582. * Returns zero if successful or error if not
  2583. *
  2584. * Notes:
  2585. * This routine transitions the device to the SDEV_BLOCK state
  2586. * (which must be a legal transition). When the device is in this
  2587. * state, all commands are deferred until the scsi lld reenables
  2588. * the device with scsi_device_unblock or device_block_tmo fires.
  2589. *
  2590. * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
  2591. * scsi_internal_device_block() has blocked a SCSI device and also
  2592. * remove the rport mutex lock and unlock calls from srp_queuecommand().
  2593. */
  2594. int
  2595. scsi_internal_device_block(struct scsi_device *sdev, bool wait)
  2596. {
  2597. struct request_queue *q = sdev->request_queue;
  2598. unsigned long flags;
  2599. int err = 0;
  2600. err = scsi_device_set_state(sdev, SDEV_BLOCK);
  2601. if (err) {
  2602. err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
  2603. if (err)
  2604. return err;
  2605. }
  2606. /*
  2607. * The device has transitioned to SDEV_BLOCK. Stop the
  2608. * block layer from calling the midlayer with this device's
  2609. * request queue.
  2610. */
  2611. if (q->mq_ops) {
  2612. if (wait)
  2613. blk_mq_quiesce_queue(q);
  2614. else
  2615. blk_mq_stop_hw_queues(q);
  2616. } else {
  2617. spin_lock_irqsave(q->queue_lock, flags);
  2618. blk_stop_queue(q);
  2619. spin_unlock_irqrestore(q->queue_lock, flags);
  2620. if (wait)
  2621. scsi_wait_for_queuecommand(sdev);
  2622. }
  2623. return 0;
  2624. }
  2625. EXPORT_SYMBOL_GPL(scsi_internal_device_block);
  2626. /**
  2627. * scsi_internal_device_unblock - resume a device after a block request
  2628. * @sdev: device to resume
  2629. * @new_state: state to set devices to after unblocking
  2630. *
  2631. * Called by scsi lld's or the midlayer to restart the device queue
  2632. * for the previously suspended scsi device. Called from interrupt or
  2633. * normal process context.
  2634. *
  2635. * Returns zero if successful or error if not.
  2636. *
  2637. * Notes:
  2638. * This routine transitions the device to the SDEV_RUNNING state
  2639. * or to one of the offline states (which must be a legal transition)
  2640. * allowing the midlayer to goose the queue for this device.
  2641. */
  2642. int
  2643. scsi_internal_device_unblock(struct scsi_device *sdev,
  2644. enum scsi_device_state new_state)
  2645. {
  2646. struct request_queue *q = sdev->request_queue;
  2647. unsigned long flags;
  2648. /*
  2649. * Try to transition the scsi device to SDEV_RUNNING or one of the
  2650. * offlined states and goose the device queue if successful.
  2651. */
  2652. if ((sdev->sdev_state == SDEV_BLOCK) ||
  2653. (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
  2654. sdev->sdev_state = new_state;
  2655. else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
  2656. if (new_state == SDEV_TRANSPORT_OFFLINE ||
  2657. new_state == SDEV_OFFLINE)
  2658. sdev->sdev_state = new_state;
  2659. else
  2660. sdev->sdev_state = SDEV_CREATED;
  2661. } else if (sdev->sdev_state != SDEV_CANCEL &&
  2662. sdev->sdev_state != SDEV_OFFLINE)
  2663. return -EINVAL;
  2664. if (q->mq_ops) {
  2665. blk_mq_start_stopped_hw_queues(q, false);
  2666. } else {
  2667. spin_lock_irqsave(q->queue_lock, flags);
  2668. blk_start_queue(q);
  2669. spin_unlock_irqrestore(q->queue_lock, flags);
  2670. }
  2671. return 0;
  2672. }
  2673. EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
  2674. static void
  2675. device_block(struct scsi_device *sdev, void *data)
  2676. {
  2677. scsi_internal_device_block(sdev, true);
  2678. }
  2679. static int
  2680. target_block(struct device *dev, void *data)
  2681. {
  2682. if (scsi_is_target_device(dev))
  2683. starget_for_each_device(to_scsi_target(dev), NULL,
  2684. device_block);
  2685. return 0;
  2686. }
  2687. void
  2688. scsi_target_block(struct device *dev)
  2689. {
  2690. if (scsi_is_target_device(dev))
  2691. starget_for_each_device(to_scsi_target(dev), NULL,
  2692. device_block);
  2693. else
  2694. device_for_each_child(dev, NULL, target_block);
  2695. }
  2696. EXPORT_SYMBOL_GPL(scsi_target_block);
  2697. static void
  2698. device_unblock(struct scsi_device *sdev, void *data)
  2699. {
  2700. scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
  2701. }
  2702. static int
  2703. target_unblock(struct device *dev, void *data)
  2704. {
  2705. if (scsi_is_target_device(dev))
  2706. starget_for_each_device(to_scsi_target(dev), data,
  2707. device_unblock);
  2708. return 0;
  2709. }
  2710. void
  2711. scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
  2712. {
  2713. if (scsi_is_target_device(dev))
  2714. starget_for_each_device(to_scsi_target(dev), &new_state,
  2715. device_unblock);
  2716. else
  2717. device_for_each_child(dev, &new_state, target_unblock);
  2718. }
  2719. EXPORT_SYMBOL_GPL(scsi_target_unblock);
  2720. /**
  2721. * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
  2722. * @sgl: scatter-gather list
  2723. * @sg_count: number of segments in sg
  2724. * @offset: offset in bytes into sg, on return offset into the mapped area
  2725. * @len: bytes to map, on return number of bytes mapped
  2726. *
  2727. * Returns virtual address of the start of the mapped page
  2728. */
  2729. void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
  2730. size_t *offset, size_t *len)
  2731. {
  2732. int i;
  2733. size_t sg_len = 0, len_complete = 0;
  2734. struct scatterlist *sg;
  2735. struct page *page;
  2736. WARN_ON(!irqs_disabled());
  2737. for_each_sg(sgl, sg, sg_count, i) {
  2738. len_complete = sg_len; /* Complete sg-entries */
  2739. sg_len += sg->length;
  2740. if (sg_len > *offset)
  2741. break;
  2742. }
  2743. if (unlikely(i == sg_count)) {
  2744. printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
  2745. "elements %d\n",
  2746. __func__, sg_len, *offset, sg_count);
  2747. WARN_ON(1);
  2748. return NULL;
  2749. }
  2750. /* Offset starting from the beginning of first page in this sg-entry */
  2751. *offset = *offset - len_complete + sg->offset;
  2752. /* Assumption: contiguous pages can be accessed as "page + i" */
  2753. page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
  2754. *offset &= ~PAGE_MASK;
  2755. /* Bytes in this sg-entry from *offset to the end of the page */
  2756. sg_len = PAGE_SIZE - *offset;
  2757. if (*len > sg_len)
  2758. *len = sg_len;
  2759. return kmap_atomic(page);
  2760. }
  2761. EXPORT_SYMBOL(scsi_kmap_atomic_sg);
  2762. /**
  2763. * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
  2764. * @virt: virtual address to be unmapped
  2765. */
  2766. void scsi_kunmap_atomic_sg(void *virt)
  2767. {
  2768. kunmap_atomic(virt);
  2769. }
  2770. EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
  2771. void sdev_disable_disk_events(struct scsi_device *sdev)
  2772. {
  2773. atomic_inc(&sdev->disk_events_disable_depth);
  2774. }
  2775. EXPORT_SYMBOL(sdev_disable_disk_events);
  2776. void sdev_enable_disk_events(struct scsi_device *sdev)
  2777. {
  2778. if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
  2779. return;
  2780. atomic_dec(&sdev->disk_events_disable_depth);
  2781. }
  2782. EXPORT_SYMBOL(sdev_enable_disk_events);
  2783. /**
  2784. * scsi_vpd_lun_id - return a unique device identification
  2785. * @sdev: SCSI device
  2786. * @id: buffer for the identification
  2787. * @id_len: length of the buffer
  2788. *
  2789. * Copies a unique device identification into @id based
  2790. * on the information in the VPD page 0x83 of the device.
  2791. * The string will be formatted as a SCSI name string.
  2792. *
  2793. * Returns the length of the identification or error on failure.
  2794. * If the identifier is longer than the supplied buffer the actual
  2795. * identifier length is returned and the buffer is not zero-padded.
  2796. */
  2797. int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
  2798. {
  2799. u8 cur_id_type = 0xff;
  2800. u8 cur_id_size = 0;
  2801. unsigned char *d, *cur_id_str;
  2802. unsigned char __rcu *vpd_pg83;
  2803. int id_size = -EINVAL;
  2804. rcu_read_lock();
  2805. vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
  2806. if (!vpd_pg83) {
  2807. rcu_read_unlock();
  2808. return -ENXIO;
  2809. }
  2810. /*
  2811. * Look for the correct descriptor.
  2812. * Order of preference for lun descriptor:
  2813. * - SCSI name string
  2814. * - NAA IEEE Registered Extended
  2815. * - EUI-64 based 16-byte
  2816. * - EUI-64 based 12-byte
  2817. * - NAA IEEE Registered
  2818. * - NAA IEEE Extended
  2819. * - T10 Vendor ID
  2820. * as longer descriptors reduce the likelyhood
  2821. * of identification clashes.
  2822. */
  2823. /* The id string must be at least 20 bytes + terminating NULL byte */
  2824. if (id_len < 21) {
  2825. rcu_read_unlock();
  2826. return -EINVAL;
  2827. }
  2828. memset(id, 0, id_len);
  2829. d = vpd_pg83 + 4;
  2830. while (d < vpd_pg83 + sdev->vpd_pg83_len) {
  2831. /* Skip designators not referring to the LUN */
  2832. if ((d[1] & 0x30) != 0x00)
  2833. goto next_desig;
  2834. switch (d[1] & 0xf) {
  2835. case 0x1:
  2836. /* T10 Vendor ID */
  2837. if (cur_id_size > d[3])
  2838. break;
  2839. /* Prefer anything */
  2840. if (cur_id_type > 0x01 && cur_id_type != 0xff)
  2841. break;
  2842. cur_id_size = d[3];
  2843. if (cur_id_size + 4 > id_len)
  2844. cur_id_size = id_len - 4;
  2845. cur_id_str = d + 4;
  2846. cur_id_type = d[1] & 0xf;
  2847. id_size = snprintf(id, id_len, "t10.%*pE",
  2848. cur_id_size, cur_id_str);
  2849. break;
  2850. case 0x2:
  2851. /* EUI-64 */
  2852. if (cur_id_size > d[3])
  2853. break;
  2854. /* Prefer NAA IEEE Registered Extended */
  2855. if (cur_id_type == 0x3 &&
  2856. cur_id_size == d[3])
  2857. break;
  2858. cur_id_size = d[3];
  2859. cur_id_str = d + 4;
  2860. cur_id_type = d[1] & 0xf;
  2861. switch (cur_id_size) {
  2862. case 8:
  2863. id_size = snprintf(id, id_len,
  2864. "eui.%8phN",
  2865. cur_id_str);
  2866. break;
  2867. case 12:
  2868. id_size = snprintf(id, id_len,
  2869. "eui.%12phN",
  2870. cur_id_str);
  2871. break;
  2872. case 16:
  2873. id_size = snprintf(id, id_len,
  2874. "eui.%16phN",
  2875. cur_id_str);
  2876. break;
  2877. default:
  2878. cur_id_size = 0;
  2879. break;
  2880. }
  2881. break;
  2882. case 0x3:
  2883. /* NAA */
  2884. if (cur_id_size > d[3])
  2885. break;
  2886. cur_id_size = d[3];
  2887. cur_id_str = d + 4;
  2888. cur_id_type = d[1] & 0xf;
  2889. switch (cur_id_size) {
  2890. case 8:
  2891. id_size = snprintf(id, id_len,
  2892. "naa.%8phN",
  2893. cur_id_str);
  2894. break;
  2895. case 16:
  2896. id_size = snprintf(id, id_len,
  2897. "naa.%16phN",
  2898. cur_id_str);
  2899. break;
  2900. default:
  2901. cur_id_size = 0;
  2902. break;
  2903. }
  2904. break;
  2905. case 0x8:
  2906. /* SCSI name string */
  2907. if (cur_id_size + 4 > d[3])
  2908. break;
  2909. /* Prefer others for truncated descriptor */
  2910. if (cur_id_size && d[3] > id_len)
  2911. break;
  2912. cur_id_size = id_size = d[3];
  2913. cur_id_str = d + 4;
  2914. cur_id_type = d[1] & 0xf;
  2915. if (cur_id_size >= id_len)
  2916. cur_id_size = id_len - 1;
  2917. memcpy(id, cur_id_str, cur_id_size);
  2918. /* Decrease priority for truncated descriptor */
  2919. if (cur_id_size != id_size)
  2920. cur_id_size = 6;
  2921. break;
  2922. default:
  2923. break;
  2924. }
  2925. next_desig:
  2926. d += d[3] + 4;
  2927. }
  2928. rcu_read_unlock();
  2929. return id_size;
  2930. }
  2931. EXPORT_SYMBOL(scsi_vpd_lun_id);
  2932. /*
  2933. * scsi_vpd_tpg_id - return a target port group identifier
  2934. * @sdev: SCSI device
  2935. *
  2936. * Returns the Target Port Group identifier from the information
  2937. * froom VPD page 0x83 of the device.
  2938. *
  2939. * Returns the identifier or error on failure.
  2940. */
  2941. int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
  2942. {
  2943. unsigned char *d;
  2944. unsigned char __rcu *vpd_pg83;
  2945. int group_id = -EAGAIN, rel_port = -1;
  2946. rcu_read_lock();
  2947. vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
  2948. if (!vpd_pg83) {
  2949. rcu_read_unlock();
  2950. return -ENXIO;
  2951. }
  2952. d = sdev->vpd_pg83 + 4;
  2953. while (d < sdev->vpd_pg83 + sdev->vpd_pg83_len) {
  2954. switch (d[1] & 0xf) {
  2955. case 0x4:
  2956. /* Relative target port */
  2957. rel_port = get_unaligned_be16(&d[6]);
  2958. break;
  2959. case 0x5:
  2960. /* Target port group */
  2961. group_id = get_unaligned_be16(&d[6]);
  2962. break;
  2963. default:
  2964. break;
  2965. }
  2966. d += d[3] + 4;
  2967. }
  2968. rcu_read_unlock();
  2969. if (group_id >= 0 && rel_id && rel_port != -1)
  2970. *rel_id = rel_port;
  2971. return group_id;
  2972. }
  2973. EXPORT_SYMBOL(scsi_vpd_tpg_id);