scsi.c 68 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588
  1. /*
  2. * NVM Express device driver
  3. * Copyright (c) 2011-2014, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. */
  14. /*
  15. * Refer to the SCSI-NVMe Translation spec for details on how
  16. * each command is translated.
  17. */
  18. #include <linux/bio.h>
  19. #include <linux/bitops.h>
  20. #include <linux/blkdev.h>
  21. #include <linux/compat.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/fs.h>
  25. #include <linux/genhd.h>
  26. #include <linux/idr.h>
  27. #include <linux/init.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/io.h>
  30. #include <linux/kdev_t.h>
  31. #include <linux/kthread.h>
  32. #include <linux/kernel.h>
  33. #include <linux/mm.h>
  34. #include <linux/module.h>
  35. #include <linux/moduleparam.h>
  36. #include <linux/pci.h>
  37. #include <linux/poison.h>
  38. #include <linux/sched.h>
  39. #include <linux/slab.h>
  40. #include <linux/types.h>
  41. #include <asm/unaligned.h>
  42. #include <scsi/sg.h>
  43. #include <scsi/scsi.h>
  44. #include "nvme.h"
  45. static int sg_version_num = 30534; /* 2 digits for each component */
  46. /* VPD Page Codes */
  47. #define VPD_SUPPORTED_PAGES 0x00
  48. #define VPD_SERIAL_NUMBER 0x80
  49. #define VPD_DEVICE_IDENTIFIERS 0x83
  50. #define VPD_EXTENDED_INQUIRY 0x86
  51. #define VPD_BLOCK_LIMITS 0xB0
  52. #define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1
  53. /* format unit paramter list offsets */
  54. #define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4
  55. #define FORMAT_UNIT_LONG_PARM_LIST_LEN 8
  56. #define FORMAT_UNIT_PROT_INT_OFFSET 3
  57. #define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0
  58. #define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07
  59. /* Misc. defines */
  60. #define FIXED_SENSE_DATA 0x70
  61. #define DESC_FORMAT_SENSE_DATA 0x72
  62. #define FIXED_SENSE_DATA_ADD_LENGTH 10
  63. #define LUN_ENTRY_SIZE 8
  64. #define LUN_DATA_HEADER_SIZE 8
  65. #define ALL_LUNS_RETURNED 0x02
  66. #define ALL_WELL_KNOWN_LUNS_RETURNED 0x01
  67. #define RESTRICTED_LUNS_RETURNED 0x00
  68. #define NVME_POWER_STATE_START_VALID 0x00
  69. #define NVME_POWER_STATE_ACTIVE 0x01
  70. #define NVME_POWER_STATE_IDLE 0x02
  71. #define NVME_POWER_STATE_STANDBY 0x03
  72. #define NVME_POWER_STATE_LU_CONTROL 0x07
  73. #define POWER_STATE_0 0
  74. #define POWER_STATE_1 1
  75. #define POWER_STATE_2 2
  76. #define POWER_STATE_3 3
  77. #define DOWNLOAD_SAVE_ACTIVATE 0x05
  78. #define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E
  79. #define ACTIVATE_DEFERRED_MICROCODE 0x0F
  80. #define FORMAT_UNIT_IMMED_MASK 0x2
  81. #define FORMAT_UNIT_IMMED_OFFSET 1
  82. #define KELVIN_TEMP_FACTOR 273
  83. #define FIXED_FMT_SENSE_DATA_SIZE 18
  84. #define DESC_FMT_SENSE_DATA_SIZE 8
  85. /* SCSI/NVMe defines and bit masks */
  86. #define INQ_STANDARD_INQUIRY_PAGE 0x00
  87. #define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00
  88. #define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80
  89. #define INQ_DEVICE_IDENTIFICATION_PAGE 0x83
  90. #define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86
  91. #define INQ_BDEV_LIMITS_PAGE 0xB0
  92. #define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1
  93. #define INQ_SERIAL_NUMBER_LENGTH 0x14
  94. #define INQ_NUM_SUPPORTED_VPD_PAGES 6
  95. #define VERSION_SPC_4 0x06
  96. #define ACA_UNSUPPORTED 0
  97. #define STANDARD_INQUIRY_LENGTH 36
  98. #define ADDITIONAL_STD_INQ_LENGTH 31
  99. #define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C
  100. #define RESERVED_FIELD 0
  101. /* Mode Sense/Select defines */
  102. #define MODE_PAGE_INFO_EXCEP 0x1C
  103. #define MODE_PAGE_CACHING 0x08
  104. #define MODE_PAGE_CONTROL 0x0A
  105. #define MODE_PAGE_POWER_CONDITION 0x1A
  106. #define MODE_PAGE_RETURN_ALL 0x3F
  107. #define MODE_PAGE_BLK_DES_LEN 0x08
  108. #define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10
  109. #define MODE_PAGE_CACHING_LEN 0x14
  110. #define MODE_PAGE_CONTROL_LEN 0x0C
  111. #define MODE_PAGE_POW_CND_LEN 0x28
  112. #define MODE_PAGE_INF_EXC_LEN 0x0C
  113. #define MODE_PAGE_ALL_LEN 0x54
  114. #define MODE_SENSE6_MPH_SIZE 4
  115. #define MODE_SENSE_PAGE_CONTROL_MASK 0xC0
  116. #define MODE_SENSE_PAGE_CODE_OFFSET 2
  117. #define MODE_SENSE_PAGE_CODE_MASK 0x3F
  118. #define MODE_SENSE_LLBAA_MASK 0x10
  119. #define MODE_SENSE_LLBAA_SHIFT 4
  120. #define MODE_SENSE_DBD_MASK 8
  121. #define MODE_SENSE_DBD_SHIFT 3
  122. #define MODE_SENSE10_MPH_SIZE 8
  123. #define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10
  124. #define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1
  125. #define MODE_SELECT_6_BD_OFFSET 3
  126. #define MODE_SELECT_10_BD_OFFSET 6
  127. #define MODE_SELECT_10_LLBAA_OFFSET 4
  128. #define MODE_SELECT_10_LLBAA_MASK 1
  129. #define MODE_SELECT_6_MPH_SIZE 4
  130. #define MODE_SELECT_10_MPH_SIZE 8
  131. #define CACHING_MODE_PAGE_WCE_MASK 0x04
  132. #define MODE_SENSE_BLK_DESC_ENABLED 0
  133. #define MODE_SENSE_BLK_DESC_COUNT 1
  134. #define MODE_SELECT_PAGE_CODE_MASK 0x3F
  135. #define SHORT_DESC_BLOCK 8
  136. #define LONG_DESC_BLOCK 16
  137. #define MODE_PAGE_POW_CND_LEN_FIELD 0x26
  138. #define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A
  139. #define MODE_PAGE_CACHING_LEN_FIELD 0x12
  140. #define MODE_PAGE_CONTROL_LEN_FIELD 0x0A
  141. #define MODE_SENSE_PC_CURRENT_VALUES 0
  142. /* Log Sense defines */
  143. #define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00
  144. #define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07
  145. #define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F
  146. #define LOG_PAGE_TEMPERATURE_PAGE 0x0D
  147. #define LOG_SENSE_CDB_SP_NOT_ENABLED 0
  148. #define LOG_SENSE_CDB_PC_MASK 0xC0
  149. #define LOG_SENSE_CDB_PC_SHIFT 6
  150. #define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1
  151. #define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F
  152. #define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8
  153. #define LOG_INFO_EXCP_PAGE_LENGTH 0xC
  154. #define REMAINING_TEMP_PAGE_LENGTH 0xC
  155. #define LOG_TEMP_PAGE_LENGTH 0x10
  156. #define LOG_TEMP_UNKNOWN 0xFF
  157. #define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3
  158. /* Read Capacity defines */
  159. #define READ_CAP_10_RESP_SIZE 8
  160. #define READ_CAP_16_RESP_SIZE 32
  161. /* NVMe Namespace and Command Defines */
  162. #define BYTES_TO_DWORDS 4
  163. #define NVME_MAX_FIRMWARE_SLOT 7
  164. /* Report LUNs defines */
  165. #define REPORT_LUNS_FIRST_LUN_OFFSET 8
  166. /* SCSI ADDITIONAL SENSE Codes */
  167. #define SCSI_ASC_NO_SENSE 0x00
  168. #define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03
  169. #define SCSI_ASC_LUN_NOT_READY 0x04
  170. #define SCSI_ASC_WARNING 0x0B
  171. #define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10
  172. #define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10
  173. #define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10
  174. #define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11
  175. #define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D
  176. #define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20
  177. #define SCSI_ASC_ILLEGAL_COMMAND 0x20
  178. #define SCSI_ASC_ILLEGAL_BLOCK 0x21
  179. #define SCSI_ASC_INVALID_CDB 0x24
  180. #define SCSI_ASC_INVALID_LUN 0x25
  181. #define SCSI_ASC_INVALID_PARAMETER 0x26
  182. #define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31
  183. #define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44
  184. /* SCSI ADDITIONAL SENSE Code Qualifiers */
  185. #define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00
  186. #define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01
  187. #define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01
  188. #define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02
  189. #define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03
  190. #define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04
  191. #define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08
  192. #define SCSI_ASCQ_INVALID_LUN_ID 0x09
  193. /* copied from drivers/usb/gadget/function/storage_common.h */
  194. static inline u32 get_unaligned_be24(u8 *buf)
  195. {
  196. return 0xffffff & (u32) get_unaligned_be32(buf - 1);
  197. }
  198. /* Struct to gather data that needs to be extracted from a SCSI CDB.
  199. Not conforming to any particular CDB variant, but compatible with all. */
  200. struct nvme_trans_io_cdb {
  201. u8 fua;
  202. u8 prot_info;
  203. u64 lba;
  204. u32 xfer_len;
  205. };
  206. /* Internal Helper Functions */
  207. /* Copy data to userspace memory */
  208. static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
  209. unsigned long n)
  210. {
  211. int i;
  212. void *index = from;
  213. size_t remaining = n;
  214. size_t xfer_len;
  215. if (hdr->iovec_count > 0) {
  216. struct sg_iovec sgl;
  217. for (i = 0; i < hdr->iovec_count; i++) {
  218. if (copy_from_user(&sgl, hdr->dxferp +
  219. i * sizeof(struct sg_iovec),
  220. sizeof(struct sg_iovec)))
  221. return -EFAULT;
  222. xfer_len = min(remaining, sgl.iov_len);
  223. if (copy_to_user(sgl.iov_base, index, xfer_len))
  224. return -EFAULT;
  225. index += xfer_len;
  226. remaining -= xfer_len;
  227. if (remaining == 0)
  228. break;
  229. }
  230. return 0;
  231. }
  232. if (copy_to_user(hdr->dxferp, from, n))
  233. return -EFAULT;
  234. return 0;
  235. }
  236. /* Copy data from userspace memory */
  237. static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
  238. unsigned long n)
  239. {
  240. int i;
  241. void *index = to;
  242. size_t remaining = n;
  243. size_t xfer_len;
  244. if (hdr->iovec_count > 0) {
  245. struct sg_iovec sgl;
  246. for (i = 0; i < hdr->iovec_count; i++) {
  247. if (copy_from_user(&sgl, hdr->dxferp +
  248. i * sizeof(struct sg_iovec),
  249. sizeof(struct sg_iovec)))
  250. return -EFAULT;
  251. xfer_len = min(remaining, sgl.iov_len);
  252. if (copy_from_user(index, sgl.iov_base, xfer_len))
  253. return -EFAULT;
  254. index += xfer_len;
  255. remaining -= xfer_len;
  256. if (remaining == 0)
  257. break;
  258. }
  259. return 0;
  260. }
  261. if (copy_from_user(to, hdr->dxferp, n))
  262. return -EFAULT;
  263. return 0;
  264. }
  265. /* Status/Sense Buffer Writeback */
  266. static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
  267. u8 asc, u8 ascq)
  268. {
  269. u8 xfer_len;
  270. u8 resp[DESC_FMT_SENSE_DATA_SIZE];
  271. if (scsi_status_is_good(status)) {
  272. hdr->status = SAM_STAT_GOOD;
  273. hdr->masked_status = GOOD;
  274. hdr->host_status = DID_OK;
  275. hdr->driver_status = DRIVER_OK;
  276. hdr->sb_len_wr = 0;
  277. } else {
  278. hdr->status = status;
  279. hdr->masked_status = status >> 1;
  280. hdr->host_status = DID_OK;
  281. hdr->driver_status = DRIVER_OK;
  282. memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
  283. resp[0] = DESC_FORMAT_SENSE_DATA;
  284. resp[1] = sense_key;
  285. resp[2] = asc;
  286. resp[3] = ascq;
  287. xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
  288. hdr->sb_len_wr = xfer_len;
  289. if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
  290. return -EFAULT;
  291. }
  292. return 0;
  293. }
  294. /*
  295. * Take a status code from a lowlevel routine, and if it was a positive NVMe
  296. * error code update the sense data based on it. In either case the passed
  297. * in value is returned again, unless an -EFAULT from copy_to_user overrides
  298. * it.
  299. */
  300. static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
  301. {
  302. u8 status, sense_key, asc, ascq;
  303. int res;
  304. /* For non-nvme (Linux) errors, simply return the error code */
  305. if (nvme_sc < 0)
  306. return nvme_sc;
  307. /* Mask DNR, More, and reserved fields */
  308. switch (nvme_sc & 0x7FF) {
  309. /* Generic Command Status */
  310. case NVME_SC_SUCCESS:
  311. status = SAM_STAT_GOOD;
  312. sense_key = NO_SENSE;
  313. asc = SCSI_ASC_NO_SENSE;
  314. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  315. break;
  316. case NVME_SC_INVALID_OPCODE:
  317. status = SAM_STAT_CHECK_CONDITION;
  318. sense_key = ILLEGAL_REQUEST;
  319. asc = SCSI_ASC_ILLEGAL_COMMAND;
  320. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  321. break;
  322. case NVME_SC_INVALID_FIELD:
  323. status = SAM_STAT_CHECK_CONDITION;
  324. sense_key = ILLEGAL_REQUEST;
  325. asc = SCSI_ASC_INVALID_CDB;
  326. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  327. break;
  328. case NVME_SC_DATA_XFER_ERROR:
  329. status = SAM_STAT_CHECK_CONDITION;
  330. sense_key = MEDIUM_ERROR;
  331. asc = SCSI_ASC_NO_SENSE;
  332. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  333. break;
  334. case NVME_SC_POWER_LOSS:
  335. status = SAM_STAT_TASK_ABORTED;
  336. sense_key = ABORTED_COMMAND;
  337. asc = SCSI_ASC_WARNING;
  338. ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
  339. break;
  340. case NVME_SC_INTERNAL:
  341. status = SAM_STAT_CHECK_CONDITION;
  342. sense_key = HARDWARE_ERROR;
  343. asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
  344. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  345. break;
  346. case NVME_SC_ABORT_REQ:
  347. status = SAM_STAT_TASK_ABORTED;
  348. sense_key = ABORTED_COMMAND;
  349. asc = SCSI_ASC_NO_SENSE;
  350. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  351. break;
  352. case NVME_SC_ABORT_QUEUE:
  353. status = SAM_STAT_TASK_ABORTED;
  354. sense_key = ABORTED_COMMAND;
  355. asc = SCSI_ASC_NO_SENSE;
  356. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  357. break;
  358. case NVME_SC_FUSED_FAIL:
  359. status = SAM_STAT_TASK_ABORTED;
  360. sense_key = ABORTED_COMMAND;
  361. asc = SCSI_ASC_NO_SENSE;
  362. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  363. break;
  364. case NVME_SC_FUSED_MISSING:
  365. status = SAM_STAT_TASK_ABORTED;
  366. sense_key = ABORTED_COMMAND;
  367. asc = SCSI_ASC_NO_SENSE;
  368. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  369. break;
  370. case NVME_SC_INVALID_NS:
  371. status = SAM_STAT_CHECK_CONDITION;
  372. sense_key = ILLEGAL_REQUEST;
  373. asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
  374. ascq = SCSI_ASCQ_INVALID_LUN_ID;
  375. break;
  376. case NVME_SC_LBA_RANGE:
  377. status = SAM_STAT_CHECK_CONDITION;
  378. sense_key = ILLEGAL_REQUEST;
  379. asc = SCSI_ASC_ILLEGAL_BLOCK;
  380. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  381. break;
  382. case NVME_SC_CAP_EXCEEDED:
  383. status = SAM_STAT_CHECK_CONDITION;
  384. sense_key = MEDIUM_ERROR;
  385. asc = SCSI_ASC_NO_SENSE;
  386. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  387. break;
  388. case NVME_SC_NS_NOT_READY:
  389. status = SAM_STAT_CHECK_CONDITION;
  390. sense_key = NOT_READY;
  391. asc = SCSI_ASC_LUN_NOT_READY;
  392. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  393. break;
  394. /* Command Specific Status */
  395. case NVME_SC_INVALID_FORMAT:
  396. status = SAM_STAT_CHECK_CONDITION;
  397. sense_key = ILLEGAL_REQUEST;
  398. asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
  399. ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
  400. break;
  401. case NVME_SC_BAD_ATTRIBUTES:
  402. status = SAM_STAT_CHECK_CONDITION;
  403. sense_key = ILLEGAL_REQUEST;
  404. asc = SCSI_ASC_INVALID_CDB;
  405. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  406. break;
  407. /* Media Errors */
  408. case NVME_SC_WRITE_FAULT:
  409. status = SAM_STAT_CHECK_CONDITION;
  410. sense_key = MEDIUM_ERROR;
  411. asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
  412. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  413. break;
  414. case NVME_SC_READ_ERROR:
  415. status = SAM_STAT_CHECK_CONDITION;
  416. sense_key = MEDIUM_ERROR;
  417. asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
  418. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  419. break;
  420. case NVME_SC_GUARD_CHECK:
  421. status = SAM_STAT_CHECK_CONDITION;
  422. sense_key = MEDIUM_ERROR;
  423. asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
  424. ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
  425. break;
  426. case NVME_SC_APPTAG_CHECK:
  427. status = SAM_STAT_CHECK_CONDITION;
  428. sense_key = MEDIUM_ERROR;
  429. asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
  430. ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
  431. break;
  432. case NVME_SC_REFTAG_CHECK:
  433. status = SAM_STAT_CHECK_CONDITION;
  434. sense_key = MEDIUM_ERROR;
  435. asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
  436. ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
  437. break;
  438. case NVME_SC_COMPARE_FAILED:
  439. status = SAM_STAT_CHECK_CONDITION;
  440. sense_key = MISCOMPARE;
  441. asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
  442. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  443. break;
  444. case NVME_SC_ACCESS_DENIED:
  445. status = SAM_STAT_CHECK_CONDITION;
  446. sense_key = ILLEGAL_REQUEST;
  447. asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
  448. ascq = SCSI_ASCQ_INVALID_LUN_ID;
  449. break;
  450. /* Unspecified/Default */
  451. case NVME_SC_CMDID_CONFLICT:
  452. case NVME_SC_CMD_SEQ_ERROR:
  453. case NVME_SC_CQ_INVALID:
  454. case NVME_SC_QID_INVALID:
  455. case NVME_SC_QUEUE_SIZE:
  456. case NVME_SC_ABORT_LIMIT:
  457. case NVME_SC_ABORT_MISSING:
  458. case NVME_SC_ASYNC_LIMIT:
  459. case NVME_SC_FIRMWARE_SLOT:
  460. case NVME_SC_FIRMWARE_IMAGE:
  461. case NVME_SC_INVALID_VECTOR:
  462. case NVME_SC_INVALID_LOG_PAGE:
  463. default:
  464. status = SAM_STAT_CHECK_CONDITION;
  465. sense_key = ILLEGAL_REQUEST;
  466. asc = SCSI_ASC_NO_SENSE;
  467. ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  468. break;
  469. }
  470. res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
  471. return res ? res : nvme_sc;
  472. }
  473. /* INQUIRY Helper Functions */
  474. static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
  475. struct sg_io_hdr *hdr, u8 *inq_response,
  476. int alloc_len)
  477. {
  478. struct nvme_dev *dev = ns->dev;
  479. struct nvme_id_ns *id_ns;
  480. int res;
  481. int nvme_sc;
  482. int xfer_len;
  483. u8 resp_data_format = 0x02;
  484. u8 protect;
  485. u8 cmdque = 0x01 << 1;
  486. u8 fw_offset = sizeof(dev->firmware_rev);
  487. /* nvme ns identify - use DPS value for PROTECT field */
  488. nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
  489. res = nvme_trans_status_code(hdr, nvme_sc);
  490. if (res)
  491. return res;
  492. if (id_ns->dps)
  493. protect = 0x01;
  494. else
  495. protect = 0;
  496. kfree(id_ns);
  497. memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
  498. inq_response[2] = VERSION_SPC_4;
  499. inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */
  500. inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
  501. inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
  502. inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
  503. strncpy(&inq_response[8], "NVMe ", 8);
  504. strncpy(&inq_response[16], dev->model, 16);
  505. while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
  506. fw_offset--;
  507. fw_offset -= 4;
  508. strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
  509. xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
  510. return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
  511. }
  512. static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
  513. struct sg_io_hdr *hdr, u8 *inq_response,
  514. int alloc_len)
  515. {
  516. int xfer_len;
  517. memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
  518. inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */
  519. inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */
  520. inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
  521. inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
  522. inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
  523. inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
  524. inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
  525. inq_response[9] = INQ_BDEV_LIMITS_PAGE;
  526. xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
  527. return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
  528. }
  529. static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
  530. struct sg_io_hdr *hdr, u8 *inq_response,
  531. int alloc_len)
  532. {
  533. struct nvme_dev *dev = ns->dev;
  534. int xfer_len;
  535. memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
  536. inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
  537. inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */
  538. strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
  539. xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
  540. return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
  541. }
  542. static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  543. u8 *inq_response, int alloc_len)
  544. {
  545. struct nvme_id_ns *id_ns;
  546. int nvme_sc, res;
  547. size_t len;
  548. void *eui;
  549. nvme_sc = nvme_identify_ns(ns->dev, ns->ns_id, &id_ns);
  550. res = nvme_trans_status_code(hdr, nvme_sc);
  551. if (res)
  552. return res;
  553. eui = id_ns->eui64;
  554. len = sizeof(id_ns->eui64);
  555. if (readl(ns->dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) {
  556. if (bitmap_empty(eui, len * 8)) {
  557. eui = id_ns->nguid;
  558. len = sizeof(id_ns->nguid);
  559. }
  560. }
  561. if (bitmap_empty(eui, len * 8)) {
  562. res = -EOPNOTSUPP;
  563. goto out_free_id;
  564. }
  565. memset(inq_response, 0, alloc_len);
  566. inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;
  567. inq_response[3] = 4 + len; /* Page Length */
  568. /* Designation Descriptor start */
  569. inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
  570. inq_response[5] = 0x02; /* PIV=0b | Asso=00b | Designator Type=2h */
  571. inq_response[6] = 0x00; /* Rsvd */
  572. inq_response[7] = len; /* Designator Length */
  573. memcpy(&inq_response[8], eui, len);
  574. res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
  575. out_free_id:
  576. kfree(id_ns);
  577. return res;
  578. }
  579. static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns,
  580. struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len)
  581. {
  582. struct nvme_dev *dev = ns->dev;
  583. struct nvme_id_ctrl *id_ctrl;
  584. int nvme_sc, res;
  585. if (alloc_len < 72) {
  586. return nvme_trans_completion(hdr,
  587. SAM_STAT_CHECK_CONDITION,
  588. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  589. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  590. }
  591. nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
  592. res = nvme_trans_status_code(hdr, nvme_sc);
  593. if (res)
  594. return res;
  595. memset(inq_response, 0, alloc_len);
  596. inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;
  597. inq_response[3] = 0x48; /* Page Length */
  598. /* Designation Descriptor start */
  599. inq_response[4] = 0x03; /* Proto ID=0h | Code set=3h */
  600. inq_response[5] = 0x08; /* PIV=0b | Asso=00b | Designator Type=8h */
  601. inq_response[6] = 0x00; /* Rsvd */
  602. inq_response[7] = 0x44; /* Designator Length */
  603. sprintf(&inq_response[8], "%04x", le16_to_cpu(id_ctrl->vid));
  604. memcpy(&inq_response[12], dev->model, sizeof(dev->model));
  605. sprintf(&inq_response[52], "%04x", cpu_to_be32(ns->ns_id));
  606. memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
  607. res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
  608. kfree(id_ctrl);
  609. return res;
  610. }
  611. static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  612. u8 *resp, int alloc_len)
  613. {
  614. int res;
  615. if (readl(ns->dev->bar + NVME_REG_VS) >= NVME_VS(1, 1)) {
  616. res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
  617. if (res != -EOPNOTSUPP)
  618. return res;
  619. }
  620. return nvme_fill_device_id_scsi_string(ns, hdr, resp, alloc_len);
  621. }
  622. static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  623. int alloc_len)
  624. {
  625. u8 *inq_response;
  626. int res;
  627. int nvme_sc;
  628. struct nvme_dev *dev = ns->dev;
  629. struct nvme_id_ctrl *id_ctrl;
  630. struct nvme_id_ns *id_ns;
  631. int xfer_len;
  632. u8 microcode = 0x80;
  633. u8 spt;
  634. u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
  635. u8 grd_chk, app_chk, ref_chk, protect;
  636. u8 uask_sup = 0x20;
  637. u8 v_sup;
  638. u8 luiclr = 0x01;
  639. inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
  640. if (inq_response == NULL)
  641. return -ENOMEM;
  642. nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
  643. res = nvme_trans_status_code(hdr, nvme_sc);
  644. if (res)
  645. goto out_free_inq;
  646. spt = spt_lut[id_ns->dpc & 0x07] << 3;
  647. if (id_ns->dps)
  648. protect = 0x01;
  649. else
  650. protect = 0;
  651. kfree(id_ns);
  652. grd_chk = protect << 2;
  653. app_chk = protect << 1;
  654. ref_chk = protect;
  655. nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
  656. res = nvme_trans_status_code(hdr, nvme_sc);
  657. if (res)
  658. goto out_free_inq;
  659. v_sup = id_ctrl->vwc;
  660. kfree(id_ctrl);
  661. memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
  662. inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */
  663. inq_response[2] = 0x00; /* Page Length MSB */
  664. inq_response[3] = 0x3C; /* Page Length LSB */
  665. inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
  666. inq_response[5] = uask_sup;
  667. inq_response[6] = v_sup;
  668. inq_response[7] = luiclr;
  669. inq_response[8] = 0;
  670. inq_response[9] = 0;
  671. xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
  672. res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
  673. out_free_inq:
  674. kfree(inq_response);
  675. return res;
  676. }
  677. static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  678. u8 *inq_response, int alloc_len)
  679. {
  680. __be32 max_sectors = cpu_to_be32(
  681. nvme_block_nr(ns, queue_max_hw_sectors(ns->queue)));
  682. __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
  683. __be32 discard_desc_count = cpu_to_be32(0x100);
  684. memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
  685. inq_response[1] = VPD_BLOCK_LIMITS;
  686. inq_response[3] = 0x3c; /* Page Length */
  687. memcpy(&inq_response[8], &max_sectors, sizeof(u32));
  688. memcpy(&inq_response[20], &max_discard, sizeof(u32));
  689. if (max_discard)
  690. memcpy(&inq_response[24], &discard_desc_count, sizeof(u32));
  691. return nvme_trans_copy_to_user(hdr, inq_response, 0x3c);
  692. }
  693. static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  694. int alloc_len)
  695. {
  696. u8 *inq_response;
  697. int res;
  698. int xfer_len;
  699. inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
  700. if (inq_response == NULL) {
  701. res = -ENOMEM;
  702. goto out_mem;
  703. }
  704. inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */
  705. inq_response[2] = 0x00; /* Page Length MSB */
  706. inq_response[3] = 0x3C; /* Page Length LSB */
  707. inq_response[4] = 0x00; /* Medium Rotation Rate MSB */
  708. inq_response[5] = 0x01; /* Medium Rotation Rate LSB */
  709. inq_response[6] = 0x00; /* Form Factor */
  710. xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
  711. res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
  712. kfree(inq_response);
  713. out_mem:
  714. return res;
  715. }
  716. /* LOG SENSE Helper Functions */
  717. static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  718. int alloc_len)
  719. {
  720. int res;
  721. int xfer_len;
  722. u8 *log_response;
  723. log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
  724. if (log_response == NULL) {
  725. res = -ENOMEM;
  726. goto out_mem;
  727. }
  728. log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
  729. /* Subpage=0x00, Page Length MSB=0 */
  730. log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
  731. log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
  732. log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
  733. log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
  734. xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
  735. res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
  736. kfree(log_response);
  737. out_mem:
  738. return res;
  739. }
  740. static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
  741. struct sg_io_hdr *hdr, int alloc_len)
  742. {
  743. int res;
  744. int xfer_len;
  745. u8 *log_response;
  746. struct nvme_dev *dev = ns->dev;
  747. struct nvme_smart_log *smart_log;
  748. u8 temp_c;
  749. u16 temp_k;
  750. log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
  751. if (log_response == NULL)
  752. return -ENOMEM;
  753. res = nvme_get_log_page(dev, &smart_log);
  754. if (res < 0)
  755. goto out_free_response;
  756. if (res != NVME_SC_SUCCESS) {
  757. temp_c = LOG_TEMP_UNKNOWN;
  758. } else {
  759. temp_k = (smart_log->temperature[1] << 8) +
  760. (smart_log->temperature[0]);
  761. temp_c = temp_k - KELVIN_TEMP_FACTOR;
  762. }
  763. kfree(smart_log);
  764. log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
  765. /* Subpage=0x00, Page Length MSB=0 */
  766. log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
  767. /* Informational Exceptions Log Parameter 1 Start */
  768. /* Parameter Code=0x0000 bytes 4,5 */
  769. log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */
  770. log_response[7] = 0x04; /* PARAMETER LENGTH */
  771. /* Add sense Code and qualifier = 0x00 each */
  772. /* Use Temperature from NVMe Get Log Page, convert to C from K */
  773. log_response[10] = temp_c;
  774. xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
  775. res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
  776. out_free_response:
  777. kfree(log_response);
  778. return res;
  779. }
  780. static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  781. int alloc_len)
  782. {
  783. int res;
  784. int xfer_len;
  785. u8 *log_response;
  786. struct nvme_dev *dev = ns->dev;
  787. struct nvme_smart_log *smart_log;
  788. u32 feature_resp;
  789. u8 temp_c_cur, temp_c_thresh;
  790. u16 temp_k;
  791. log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
  792. if (log_response == NULL)
  793. return -ENOMEM;
  794. res = nvme_get_log_page(dev, &smart_log);
  795. if (res < 0)
  796. goto out_free_response;
  797. if (res != NVME_SC_SUCCESS) {
  798. temp_c_cur = LOG_TEMP_UNKNOWN;
  799. } else {
  800. temp_k = (smart_log->temperature[1] << 8) +
  801. (smart_log->temperature[0]);
  802. temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
  803. }
  804. kfree(smart_log);
  805. /* Get Features for Temp Threshold */
  806. res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
  807. &feature_resp);
  808. if (res != NVME_SC_SUCCESS)
  809. temp_c_thresh = LOG_TEMP_UNKNOWN;
  810. else
  811. temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
  812. log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
  813. /* Subpage=0x00, Page Length MSB=0 */
  814. log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
  815. /* Temperature Log Parameter 1 (Temperature) Start */
  816. /* Parameter Code = 0x0000 */
  817. log_response[6] = 0x01; /* Format and Linking = 01b */
  818. log_response[7] = 0x02; /* Parameter Length */
  819. /* Use Temperature from NVMe Get Log Page, convert to C from K */
  820. log_response[9] = temp_c_cur;
  821. /* Temperature Log Parameter 2 (Reference Temperature) Start */
  822. log_response[11] = 0x01; /* Parameter Code = 0x0001 */
  823. log_response[12] = 0x01; /* Format and Linking = 01b */
  824. log_response[13] = 0x02; /* Parameter Length */
  825. /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */
  826. log_response[15] = temp_c_thresh;
  827. xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
  828. res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
  829. out_free_response:
  830. kfree(log_response);
  831. return res;
  832. }
  833. /* MODE SENSE Helper Functions */
  834. static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
  835. u16 mode_data_length, u16 blk_desc_len)
  836. {
  837. /* Quick check to make sure I don't stomp on my own memory... */
  838. if ((cdb10 && len < 8) || (!cdb10 && len < 4))
  839. return -EINVAL;
  840. if (cdb10) {
  841. resp[0] = (mode_data_length & 0xFF00) >> 8;
  842. resp[1] = (mode_data_length & 0x00FF);
  843. resp[3] = 0x10 /* DPOFUA */;
  844. resp[4] = llbaa;
  845. resp[5] = RESERVED_FIELD;
  846. resp[6] = (blk_desc_len & 0xFF00) >> 8;
  847. resp[7] = (blk_desc_len & 0x00FF);
  848. } else {
  849. resp[0] = (mode_data_length & 0x00FF);
  850. resp[2] = 0x10 /* DPOFUA */;
  851. resp[3] = (blk_desc_len & 0x00FF);
  852. }
  853. return 0;
  854. }
  855. static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  856. u8 *resp, int len, u8 llbaa)
  857. {
  858. int res;
  859. int nvme_sc;
  860. struct nvme_dev *dev = ns->dev;
  861. struct nvme_id_ns *id_ns;
  862. u8 flbas;
  863. u32 lba_length;
  864. if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
  865. return -EINVAL;
  866. else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
  867. return -EINVAL;
  868. nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
  869. res = nvme_trans_status_code(hdr, nvme_sc);
  870. if (res)
  871. return res;
  872. flbas = (id_ns->flbas) & 0x0F;
  873. lba_length = (1 << (id_ns->lbaf[flbas].ds));
  874. if (llbaa == 0) {
  875. __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
  876. /* Byte 4 is reserved */
  877. __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
  878. memcpy(resp, &tmp_cap, sizeof(u32));
  879. memcpy(&resp[4], &tmp_len, sizeof(u32));
  880. } else {
  881. __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
  882. __be32 tmp_len = cpu_to_be32(lba_length);
  883. memcpy(resp, &tmp_cap, sizeof(u64));
  884. /* Bytes 8, 9, 10, 11 are reserved */
  885. memcpy(&resp[12], &tmp_len, sizeof(u32));
  886. }
  887. kfree(id_ns);
  888. return res;
  889. }
  890. static int nvme_trans_fill_control_page(struct nvme_ns *ns,
  891. struct sg_io_hdr *hdr, u8 *resp,
  892. int len)
  893. {
  894. if (len < MODE_PAGE_CONTROL_LEN)
  895. return -EINVAL;
  896. resp[0] = MODE_PAGE_CONTROL;
  897. resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
  898. resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1,
  899. * D_SENSE=1, GLTSD=1, RLEC=0 */
  900. resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */
  901. /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */
  902. resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */
  903. /* resp[6] and [7] are obsolete, thus zero */
  904. resp[8] = 0xFF; /* Busy timeout period = 0xffff */
  905. resp[9] = 0xFF;
  906. /* Bytes 10,11: Extended selftest completion time = 0x0000 */
  907. return 0;
  908. }
  909. static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
  910. struct sg_io_hdr *hdr,
  911. u8 *resp, int len)
  912. {
  913. int res = 0;
  914. int nvme_sc;
  915. struct nvme_dev *dev = ns->dev;
  916. u32 feature_resp;
  917. u8 vwc;
  918. if (len < MODE_PAGE_CACHING_LEN)
  919. return -EINVAL;
  920. nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
  921. &feature_resp);
  922. res = nvme_trans_status_code(hdr, nvme_sc);
  923. if (res)
  924. return res;
  925. vwc = feature_resp & 0x00000001;
  926. resp[0] = MODE_PAGE_CACHING;
  927. resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
  928. resp[2] = vwc << 2;
  929. return 0;
  930. }
  931. static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
  932. struct sg_io_hdr *hdr, u8 *resp,
  933. int len)
  934. {
  935. if (len < MODE_PAGE_POW_CND_LEN)
  936. return -EINVAL;
  937. resp[0] = MODE_PAGE_POWER_CONDITION;
  938. resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
  939. /* All other bytes are zero */
  940. return 0;
  941. }
  942. static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
  943. struct sg_io_hdr *hdr, u8 *resp,
  944. int len)
  945. {
  946. if (len < MODE_PAGE_INF_EXC_LEN)
  947. return -EINVAL;
  948. resp[0] = MODE_PAGE_INFO_EXCEP;
  949. resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
  950. resp[2] = 0x88;
  951. /* All other bytes are zero */
  952. return 0;
  953. }
  954. static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  955. u8 *resp, int len)
  956. {
  957. int res;
  958. u16 mode_pages_offset_1 = 0;
  959. u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
  960. mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
  961. mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
  962. mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
  963. res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
  964. MODE_PAGE_CACHING_LEN);
  965. if (res)
  966. return res;
  967. res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
  968. MODE_PAGE_CONTROL_LEN);
  969. if (res)
  970. return res;
  971. res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
  972. MODE_PAGE_POW_CND_LEN);
  973. if (res)
  974. return res;
  975. return nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
  976. MODE_PAGE_INF_EXC_LEN);
  977. }
  978. static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
  979. {
  980. if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
  981. /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */
  982. return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
  983. } else {
  984. return 0;
  985. }
  986. }
  987. static int nvme_trans_mode_page_create(struct nvme_ns *ns,
  988. struct sg_io_hdr *hdr, u8 *cmd,
  989. u16 alloc_len, u8 cdb10,
  990. int (*mode_page_fill_func)
  991. (struct nvme_ns *,
  992. struct sg_io_hdr *hdr, u8 *, int),
  993. u16 mode_pages_tot_len)
  994. {
  995. int res;
  996. int xfer_len;
  997. u8 *response;
  998. u8 dbd, llbaa;
  999. u16 resp_size;
  1000. int mph_size;
  1001. u16 mode_pages_offset_1;
  1002. u16 blk_desc_len, blk_desc_offset, mode_data_length;
  1003. dbd = (cmd[1] & MODE_SENSE_DBD_MASK) >> MODE_SENSE_DBD_SHIFT;
  1004. llbaa = (cmd[1] & MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT;
  1005. mph_size = cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE;
  1006. blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
  1007. resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
  1008. /* Refer spc4r34 Table 440 for calculation of Mode data Length field */
  1009. mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
  1010. blk_desc_offset = mph_size;
  1011. mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
  1012. response = kzalloc(resp_size, GFP_KERNEL);
  1013. if (response == NULL) {
  1014. res = -ENOMEM;
  1015. goto out_mem;
  1016. }
  1017. res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
  1018. llbaa, mode_data_length, blk_desc_len);
  1019. if (res)
  1020. goto out_free;
  1021. if (blk_desc_len > 0) {
  1022. res = nvme_trans_fill_blk_desc(ns, hdr,
  1023. &response[blk_desc_offset],
  1024. blk_desc_len, llbaa);
  1025. if (res)
  1026. goto out_free;
  1027. }
  1028. res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
  1029. mode_pages_tot_len);
  1030. if (res)
  1031. goto out_free;
  1032. xfer_len = min(alloc_len, resp_size);
  1033. res = nvme_trans_copy_to_user(hdr, response, xfer_len);
  1034. out_free:
  1035. kfree(response);
  1036. out_mem:
  1037. return res;
  1038. }
  1039. /* Read Capacity Helper Functions */
  1040. static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
  1041. u8 cdb16)
  1042. {
  1043. u8 flbas;
  1044. u32 lba_length;
  1045. u64 rlba;
  1046. u8 prot_en;
  1047. u8 p_type_lut[4] = {0, 0, 1, 2};
  1048. __be64 tmp_rlba;
  1049. __be32 tmp_rlba_32;
  1050. __be32 tmp_len;
  1051. flbas = (id_ns->flbas) & 0x0F;
  1052. lba_length = (1 << (id_ns->lbaf[flbas].ds));
  1053. rlba = le64_to_cpup(&id_ns->nsze) - 1;
  1054. (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
  1055. if (!cdb16) {
  1056. if (rlba > 0xFFFFFFFF)
  1057. rlba = 0xFFFFFFFF;
  1058. tmp_rlba_32 = cpu_to_be32(rlba);
  1059. tmp_len = cpu_to_be32(lba_length);
  1060. memcpy(response, &tmp_rlba_32, sizeof(u32));
  1061. memcpy(&response[4], &tmp_len, sizeof(u32));
  1062. } else {
  1063. tmp_rlba = cpu_to_be64(rlba);
  1064. tmp_len = cpu_to_be32(lba_length);
  1065. memcpy(response, &tmp_rlba, sizeof(u64));
  1066. memcpy(&response[8], &tmp_len, sizeof(u32));
  1067. response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
  1068. /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */
  1069. /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */
  1070. /* Bytes 16-31 - Reserved */
  1071. }
  1072. }
  1073. /* Start Stop Unit Helper Functions */
  1074. static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1075. u8 pc, u8 pcmod, u8 start)
  1076. {
  1077. int res;
  1078. int nvme_sc;
  1079. struct nvme_dev *dev = ns->dev;
  1080. struct nvme_id_ctrl *id_ctrl;
  1081. int lowest_pow_st; /* max npss = lowest power consumption */
  1082. unsigned ps_desired = 0;
  1083. nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
  1084. res = nvme_trans_status_code(hdr, nvme_sc);
  1085. if (res)
  1086. return res;
  1087. lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
  1088. kfree(id_ctrl);
  1089. switch (pc) {
  1090. case NVME_POWER_STATE_START_VALID:
  1091. /* Action unspecified if POWER CONDITION MODIFIER != 0 */
  1092. if (pcmod == 0 && start == 0x1)
  1093. ps_desired = POWER_STATE_0;
  1094. if (pcmod == 0 && start == 0x0)
  1095. ps_desired = lowest_pow_st;
  1096. break;
  1097. case NVME_POWER_STATE_ACTIVE:
  1098. /* Action unspecified if POWER CONDITION MODIFIER != 0 */
  1099. if (pcmod == 0)
  1100. ps_desired = POWER_STATE_0;
  1101. break;
  1102. case NVME_POWER_STATE_IDLE:
  1103. /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
  1104. if (pcmod == 0x0)
  1105. ps_desired = POWER_STATE_1;
  1106. else if (pcmod == 0x1)
  1107. ps_desired = POWER_STATE_2;
  1108. else if (pcmod == 0x2)
  1109. ps_desired = POWER_STATE_3;
  1110. break;
  1111. case NVME_POWER_STATE_STANDBY:
  1112. /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
  1113. if (pcmod == 0x0)
  1114. ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2));
  1115. else if (pcmod == 0x1)
  1116. ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1));
  1117. break;
  1118. case NVME_POWER_STATE_LU_CONTROL:
  1119. default:
  1120. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1121. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1122. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1123. break;
  1124. }
  1125. nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
  1126. NULL);
  1127. return nvme_trans_status_code(hdr, nvme_sc);
  1128. }
  1129. static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1130. u8 buffer_id)
  1131. {
  1132. struct nvme_command c;
  1133. int nvme_sc;
  1134. memset(&c, 0, sizeof(c));
  1135. c.common.opcode = nvme_admin_activate_fw;
  1136. c.common.cdw10[0] = cpu_to_le32(buffer_id | NVME_FWACT_REPL_ACTV);
  1137. nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0);
  1138. return nvme_trans_status_code(hdr, nvme_sc);
  1139. }
  1140. static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1141. u8 opcode, u32 tot_len, u32 offset,
  1142. u8 buffer_id)
  1143. {
  1144. int nvme_sc;
  1145. struct nvme_dev *dev = ns->dev;
  1146. struct nvme_command c;
  1147. if (hdr->iovec_count > 0) {
  1148. /* Assuming SGL is not allowed for this command */
  1149. return nvme_trans_completion(hdr,
  1150. SAM_STAT_CHECK_CONDITION,
  1151. ILLEGAL_REQUEST,
  1152. SCSI_ASC_INVALID_CDB,
  1153. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1154. }
  1155. memset(&c, 0, sizeof(c));
  1156. c.common.opcode = nvme_admin_download_fw;
  1157. c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
  1158. c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
  1159. nvme_sc = __nvme_submit_sync_cmd(dev->admin_q, &c, NULL,
  1160. hdr->dxferp, tot_len, NULL, 0);
  1161. return nvme_trans_status_code(hdr, nvme_sc);
  1162. }
  1163. /* Mode Select Helper Functions */
  1164. static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
  1165. u16 *bd_len, u8 *llbaa)
  1166. {
  1167. if (cdb10) {
  1168. /* 10 Byte CDB */
  1169. *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
  1170. parm_list[MODE_SELECT_10_BD_OFFSET + 1];
  1171. *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
  1172. MODE_SELECT_10_LLBAA_MASK;
  1173. } else {
  1174. /* 6 Byte CDB */
  1175. *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
  1176. }
  1177. }
  1178. static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
  1179. u16 idx, u16 bd_len, u8 llbaa)
  1180. {
  1181. u16 bd_num;
  1182. bd_num = bd_len / ((llbaa == 0) ?
  1183. SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
  1184. /* Store block descriptor info if a FORMAT UNIT comes later */
  1185. /* TODO Saving 1st BD info; what to do if multiple BD received? */
  1186. if (llbaa == 0) {
  1187. /* Standard Block Descriptor - spc4r34 7.5.5.1 */
  1188. ns->mode_select_num_blocks =
  1189. (parm_list[idx + 1] << 16) +
  1190. (parm_list[idx + 2] << 8) +
  1191. (parm_list[idx + 3]);
  1192. ns->mode_select_block_len =
  1193. (parm_list[idx + 5] << 16) +
  1194. (parm_list[idx + 6] << 8) +
  1195. (parm_list[idx + 7]);
  1196. } else {
  1197. /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */
  1198. ns->mode_select_num_blocks =
  1199. (((u64)parm_list[idx + 0]) << 56) +
  1200. (((u64)parm_list[idx + 1]) << 48) +
  1201. (((u64)parm_list[idx + 2]) << 40) +
  1202. (((u64)parm_list[idx + 3]) << 32) +
  1203. (((u64)parm_list[idx + 4]) << 24) +
  1204. (((u64)parm_list[idx + 5]) << 16) +
  1205. (((u64)parm_list[idx + 6]) << 8) +
  1206. ((u64)parm_list[idx + 7]);
  1207. ns->mode_select_block_len =
  1208. (parm_list[idx + 12] << 24) +
  1209. (parm_list[idx + 13] << 16) +
  1210. (parm_list[idx + 14] << 8) +
  1211. (parm_list[idx + 15]);
  1212. }
  1213. }
  1214. static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1215. u8 *mode_page, u8 page_code)
  1216. {
  1217. int res = 0;
  1218. int nvme_sc;
  1219. struct nvme_dev *dev = ns->dev;
  1220. unsigned dword11;
  1221. switch (page_code) {
  1222. case MODE_PAGE_CACHING:
  1223. dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
  1224. nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
  1225. 0, NULL);
  1226. res = nvme_trans_status_code(hdr, nvme_sc);
  1227. break;
  1228. case MODE_PAGE_CONTROL:
  1229. break;
  1230. case MODE_PAGE_POWER_CONDITION:
  1231. /* Verify the OS is not trying to set timers */
  1232. if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
  1233. res = nvme_trans_completion(hdr,
  1234. SAM_STAT_CHECK_CONDITION,
  1235. ILLEGAL_REQUEST,
  1236. SCSI_ASC_INVALID_PARAMETER,
  1237. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1238. break;
  1239. }
  1240. break;
  1241. default:
  1242. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1243. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1244. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1245. break;
  1246. }
  1247. return res;
  1248. }
  1249. static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1250. u8 *cmd, u16 parm_list_len, u8 pf,
  1251. u8 sp, u8 cdb10)
  1252. {
  1253. int res;
  1254. u8 *parm_list;
  1255. u16 bd_len;
  1256. u8 llbaa = 0;
  1257. u16 index, saved_index;
  1258. u8 page_code;
  1259. u16 mp_size;
  1260. /* Get parm list from data-in/out buffer */
  1261. parm_list = kmalloc(parm_list_len, GFP_KERNEL);
  1262. if (parm_list == NULL) {
  1263. res = -ENOMEM;
  1264. goto out;
  1265. }
  1266. res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
  1267. if (res)
  1268. goto out_mem;
  1269. nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
  1270. index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
  1271. if (bd_len != 0) {
  1272. /* Block Descriptors present, parse */
  1273. nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
  1274. index += bd_len;
  1275. }
  1276. saved_index = index;
  1277. /* Multiple mode pages may be present; iterate through all */
  1278. /* In 1st Iteration, don't do NVME Command, only check for CDB errors */
  1279. do {
  1280. page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
  1281. mp_size = parm_list[index + 1] + 2;
  1282. if ((page_code != MODE_PAGE_CACHING) &&
  1283. (page_code != MODE_PAGE_CONTROL) &&
  1284. (page_code != MODE_PAGE_POWER_CONDITION)) {
  1285. res = nvme_trans_completion(hdr,
  1286. SAM_STAT_CHECK_CONDITION,
  1287. ILLEGAL_REQUEST,
  1288. SCSI_ASC_INVALID_CDB,
  1289. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1290. goto out_mem;
  1291. }
  1292. index += mp_size;
  1293. } while (index < parm_list_len);
  1294. /* In 2nd Iteration, do the NVME Commands */
  1295. index = saved_index;
  1296. do {
  1297. page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
  1298. mp_size = parm_list[index + 1] + 2;
  1299. res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
  1300. page_code);
  1301. if (res)
  1302. break;
  1303. index += mp_size;
  1304. } while (index < parm_list_len);
  1305. out_mem:
  1306. kfree(parm_list);
  1307. out:
  1308. return res;
  1309. }
  1310. /* Format Unit Helper Functions */
  1311. static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
  1312. struct sg_io_hdr *hdr)
  1313. {
  1314. int res = 0;
  1315. int nvme_sc;
  1316. struct nvme_dev *dev = ns->dev;
  1317. u8 flbas;
  1318. /*
  1319. * SCSI Expects a MODE SELECT would have been issued prior to
  1320. * a FORMAT UNIT, and the block size and number would be used
  1321. * from the block descriptor in it. If a MODE SELECT had not
  1322. * been issued, FORMAT shall use the current values for both.
  1323. */
  1324. if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
  1325. struct nvme_id_ns *id_ns;
  1326. nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
  1327. res = nvme_trans_status_code(hdr, nvme_sc);
  1328. if (res)
  1329. return res;
  1330. if (ns->mode_select_num_blocks == 0)
  1331. ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
  1332. if (ns->mode_select_block_len == 0) {
  1333. flbas = (id_ns->flbas) & 0x0F;
  1334. ns->mode_select_block_len =
  1335. (1 << (id_ns->lbaf[flbas].ds));
  1336. }
  1337. kfree(id_ns);
  1338. }
  1339. return 0;
  1340. }
  1341. static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
  1342. u8 format_prot_info, u8 *nvme_pf_code)
  1343. {
  1344. int res;
  1345. u8 *parm_list;
  1346. u8 pf_usage, pf_code;
  1347. parm_list = kmalloc(len, GFP_KERNEL);
  1348. if (parm_list == NULL) {
  1349. res = -ENOMEM;
  1350. goto out;
  1351. }
  1352. res = nvme_trans_copy_from_user(hdr, parm_list, len);
  1353. if (res)
  1354. goto out_mem;
  1355. if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
  1356. FORMAT_UNIT_IMMED_MASK) != 0) {
  1357. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1358. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1359. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1360. goto out_mem;
  1361. }
  1362. if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
  1363. (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
  1364. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1365. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1366. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1367. goto out_mem;
  1368. }
  1369. pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
  1370. FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
  1371. pf_code = (pf_usage << 2) | format_prot_info;
  1372. switch (pf_code) {
  1373. case 0:
  1374. *nvme_pf_code = 0;
  1375. break;
  1376. case 2:
  1377. *nvme_pf_code = 1;
  1378. break;
  1379. case 3:
  1380. *nvme_pf_code = 2;
  1381. break;
  1382. case 7:
  1383. *nvme_pf_code = 3;
  1384. break;
  1385. default:
  1386. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1387. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1388. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1389. break;
  1390. }
  1391. out_mem:
  1392. kfree(parm_list);
  1393. out:
  1394. return res;
  1395. }
  1396. static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1397. u8 prot_info)
  1398. {
  1399. int res;
  1400. int nvme_sc;
  1401. struct nvme_dev *dev = ns->dev;
  1402. struct nvme_id_ns *id_ns;
  1403. u8 i;
  1404. u8 flbas, nlbaf;
  1405. u8 selected_lbaf = 0xFF;
  1406. u32 cdw10 = 0;
  1407. struct nvme_command c;
  1408. /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
  1409. nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
  1410. res = nvme_trans_status_code(hdr, nvme_sc);
  1411. if (res)
  1412. return res;
  1413. flbas = (id_ns->flbas) & 0x0F;
  1414. nlbaf = id_ns->nlbaf;
  1415. for (i = 0; i < nlbaf; i++) {
  1416. if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
  1417. selected_lbaf = i;
  1418. break;
  1419. }
  1420. }
  1421. if (selected_lbaf > 0x0F) {
  1422. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1423. ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
  1424. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1425. }
  1426. if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
  1427. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1428. ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
  1429. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1430. }
  1431. cdw10 |= prot_info << 5;
  1432. cdw10 |= selected_lbaf & 0x0F;
  1433. memset(&c, 0, sizeof(c));
  1434. c.format.opcode = nvme_admin_format_nvm;
  1435. c.format.nsid = cpu_to_le32(ns->ns_id);
  1436. c.format.cdw10 = cpu_to_le32(cdw10);
  1437. nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
  1438. res = nvme_trans_status_code(hdr, nvme_sc);
  1439. kfree(id_ns);
  1440. return res;
  1441. }
  1442. static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
  1443. struct nvme_trans_io_cdb *cdb_info,
  1444. u32 max_blocks)
  1445. {
  1446. /* If using iovecs, send one nvme command per vector */
  1447. if (hdr->iovec_count > 0)
  1448. return hdr->iovec_count;
  1449. else if (cdb_info->xfer_len > max_blocks)
  1450. return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
  1451. else
  1452. return 1;
  1453. }
  1454. static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
  1455. struct nvme_trans_io_cdb *cdb_info)
  1456. {
  1457. u16 control = 0;
  1458. /* When Protection information support is added, implement here */
  1459. if (cdb_info->fua > 0)
  1460. control |= NVME_RW_FUA;
  1461. return control;
  1462. }
  1463. static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1464. struct nvme_trans_io_cdb *cdb_info, u8 is_write)
  1465. {
  1466. int nvme_sc = NVME_SC_SUCCESS;
  1467. u32 num_cmds;
  1468. u64 unit_len;
  1469. u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */
  1470. u32 retcode;
  1471. u32 i = 0;
  1472. u64 nvme_offset = 0;
  1473. void __user *next_mapping_addr;
  1474. struct nvme_command c;
  1475. u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
  1476. u16 control;
  1477. u32 max_blocks = queue_max_hw_sectors(ns->queue);
  1478. num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
  1479. /*
  1480. * This loop handles two cases.
  1481. * First, when an SGL is used in the form of an iovec list:
  1482. * - Use iov_base as the next mapping address for the nvme command_id
  1483. * - Use iov_len as the data transfer length for the command.
  1484. * Second, when we have a single buffer
  1485. * - If larger than max_blocks, split into chunks, offset
  1486. * each nvme command accordingly.
  1487. */
  1488. for (i = 0; i < num_cmds; i++) {
  1489. memset(&c, 0, sizeof(c));
  1490. if (hdr->iovec_count > 0) {
  1491. struct sg_iovec sgl;
  1492. retcode = copy_from_user(&sgl, hdr->dxferp +
  1493. i * sizeof(struct sg_iovec),
  1494. sizeof(struct sg_iovec));
  1495. if (retcode)
  1496. return -EFAULT;
  1497. unit_len = sgl.iov_len;
  1498. unit_num_blocks = unit_len >> ns->lba_shift;
  1499. next_mapping_addr = sgl.iov_base;
  1500. } else {
  1501. unit_num_blocks = min((u64)max_blocks,
  1502. (cdb_info->xfer_len - nvme_offset));
  1503. unit_len = unit_num_blocks << ns->lba_shift;
  1504. next_mapping_addr = hdr->dxferp +
  1505. ((1 << ns->lba_shift) * nvme_offset);
  1506. }
  1507. c.rw.opcode = opcode;
  1508. c.rw.nsid = cpu_to_le32(ns->ns_id);
  1509. c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
  1510. c.rw.length = cpu_to_le16(unit_num_blocks - 1);
  1511. control = nvme_trans_io_get_control(ns, cdb_info);
  1512. c.rw.control = cpu_to_le16(control);
  1513. if (get_capacity(ns->disk) - unit_num_blocks <
  1514. cdb_info->lba + nvme_offset) {
  1515. nvme_sc = NVME_SC_LBA_RANGE;
  1516. break;
  1517. }
  1518. nvme_sc = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
  1519. next_mapping_addr, unit_len, NULL, 0);
  1520. if (nvme_sc)
  1521. break;
  1522. nvme_offset += unit_num_blocks;
  1523. }
  1524. return nvme_trans_status_code(hdr, nvme_sc);
  1525. }
  1526. /* SCSI Command Translation Functions */
  1527. static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
  1528. u8 *cmd)
  1529. {
  1530. int res = 0;
  1531. struct nvme_trans_io_cdb cdb_info = { 0, };
  1532. u8 opcode = cmd[0];
  1533. u64 xfer_bytes;
  1534. u64 sum_iov_len = 0;
  1535. struct sg_iovec sgl;
  1536. int i;
  1537. size_t not_copied;
  1538. /*
  1539. * The FUA and WPROTECT fields are not supported in 6-byte CDBs,
  1540. * but always in the same place for all others.
  1541. */
  1542. switch (opcode) {
  1543. case WRITE_6:
  1544. case READ_6:
  1545. break;
  1546. default:
  1547. cdb_info.fua = cmd[1] & 0x8;
  1548. cdb_info.prot_info = (cmd[1] & 0xe0) >> 5;
  1549. if (cdb_info.prot_info && !ns->pi_type) {
  1550. return nvme_trans_completion(hdr,
  1551. SAM_STAT_CHECK_CONDITION,
  1552. ILLEGAL_REQUEST,
  1553. SCSI_ASC_INVALID_CDB,
  1554. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1555. }
  1556. }
  1557. switch (opcode) {
  1558. case WRITE_6:
  1559. case READ_6:
  1560. cdb_info.lba = get_unaligned_be24(&cmd[1]);
  1561. cdb_info.xfer_len = cmd[4];
  1562. if (cdb_info.xfer_len == 0)
  1563. cdb_info.xfer_len = 256;
  1564. break;
  1565. case WRITE_10:
  1566. case READ_10:
  1567. cdb_info.lba = get_unaligned_be32(&cmd[2]);
  1568. cdb_info.xfer_len = get_unaligned_be16(&cmd[7]);
  1569. break;
  1570. case WRITE_12:
  1571. case READ_12:
  1572. cdb_info.lba = get_unaligned_be32(&cmd[2]);
  1573. cdb_info.xfer_len = get_unaligned_be32(&cmd[6]);
  1574. break;
  1575. case WRITE_16:
  1576. case READ_16:
  1577. cdb_info.lba = get_unaligned_be64(&cmd[2]);
  1578. cdb_info.xfer_len = get_unaligned_be32(&cmd[10]);
  1579. break;
  1580. default:
  1581. /* Will never really reach here */
  1582. res = -EIO;
  1583. goto out;
  1584. }
  1585. /* Calculate total length of transfer (in bytes) */
  1586. if (hdr->iovec_count > 0) {
  1587. for (i = 0; i < hdr->iovec_count; i++) {
  1588. not_copied = copy_from_user(&sgl, hdr->dxferp +
  1589. i * sizeof(struct sg_iovec),
  1590. sizeof(struct sg_iovec));
  1591. if (not_copied)
  1592. return -EFAULT;
  1593. sum_iov_len += sgl.iov_len;
  1594. /* IO vector sizes should be multiples of block size */
  1595. if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
  1596. res = nvme_trans_completion(hdr,
  1597. SAM_STAT_CHECK_CONDITION,
  1598. ILLEGAL_REQUEST,
  1599. SCSI_ASC_INVALID_PARAMETER,
  1600. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1601. goto out;
  1602. }
  1603. }
  1604. } else {
  1605. sum_iov_len = hdr->dxfer_len;
  1606. }
  1607. /* As Per sg ioctl howto, if the lengths differ, use the lower one */
  1608. xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
  1609. /* If block count and actual data buffer size dont match, error out */
  1610. if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
  1611. res = -EINVAL;
  1612. goto out;
  1613. }
  1614. /* Check for 0 length transfer - it is not illegal */
  1615. if (cdb_info.xfer_len == 0)
  1616. goto out;
  1617. /* Send NVMe IO Command(s) */
  1618. res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
  1619. if (res)
  1620. goto out;
  1621. out:
  1622. return res;
  1623. }
  1624. static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1625. u8 *cmd)
  1626. {
  1627. int res = 0;
  1628. u8 evpd;
  1629. u8 page_code;
  1630. int alloc_len;
  1631. u8 *inq_response;
  1632. evpd = cmd[1] & 0x01;
  1633. page_code = cmd[2];
  1634. alloc_len = get_unaligned_be16(&cmd[3]);
  1635. inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH),
  1636. GFP_KERNEL);
  1637. if (inq_response == NULL) {
  1638. res = -ENOMEM;
  1639. goto out_mem;
  1640. }
  1641. if (evpd == 0) {
  1642. if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
  1643. res = nvme_trans_standard_inquiry_page(ns, hdr,
  1644. inq_response, alloc_len);
  1645. } else {
  1646. res = nvme_trans_completion(hdr,
  1647. SAM_STAT_CHECK_CONDITION,
  1648. ILLEGAL_REQUEST,
  1649. SCSI_ASC_INVALID_CDB,
  1650. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1651. }
  1652. } else {
  1653. switch (page_code) {
  1654. case VPD_SUPPORTED_PAGES:
  1655. res = nvme_trans_supported_vpd_pages(ns, hdr,
  1656. inq_response, alloc_len);
  1657. break;
  1658. case VPD_SERIAL_NUMBER:
  1659. res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
  1660. alloc_len);
  1661. break;
  1662. case VPD_DEVICE_IDENTIFIERS:
  1663. res = nvme_trans_device_id_page(ns, hdr, inq_response,
  1664. alloc_len);
  1665. break;
  1666. case VPD_EXTENDED_INQUIRY:
  1667. res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
  1668. break;
  1669. case VPD_BLOCK_LIMITS:
  1670. res = nvme_trans_bdev_limits_page(ns, hdr, inq_response,
  1671. alloc_len);
  1672. break;
  1673. case VPD_BLOCK_DEV_CHARACTERISTICS:
  1674. res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
  1675. break;
  1676. default:
  1677. res = nvme_trans_completion(hdr,
  1678. SAM_STAT_CHECK_CONDITION,
  1679. ILLEGAL_REQUEST,
  1680. SCSI_ASC_INVALID_CDB,
  1681. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1682. break;
  1683. }
  1684. }
  1685. kfree(inq_response);
  1686. out_mem:
  1687. return res;
  1688. }
  1689. static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1690. u8 *cmd)
  1691. {
  1692. int res;
  1693. u16 alloc_len;
  1694. u8 pc;
  1695. u8 page_code;
  1696. if (cmd[1] != LOG_SENSE_CDB_SP_NOT_ENABLED) {
  1697. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1698. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1699. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1700. goto out;
  1701. }
  1702. page_code = cmd[2] & LOG_SENSE_CDB_PAGE_CODE_MASK;
  1703. pc = (cmd[2] & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
  1704. if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
  1705. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1706. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1707. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1708. goto out;
  1709. }
  1710. alloc_len = get_unaligned_be16(&cmd[7]);
  1711. switch (page_code) {
  1712. case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
  1713. res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
  1714. break;
  1715. case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
  1716. res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
  1717. break;
  1718. case LOG_PAGE_TEMPERATURE_PAGE:
  1719. res = nvme_trans_log_temperature(ns, hdr, alloc_len);
  1720. break;
  1721. default:
  1722. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1723. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1724. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1725. break;
  1726. }
  1727. out:
  1728. return res;
  1729. }
  1730. static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1731. u8 *cmd)
  1732. {
  1733. u8 cdb10 = 0;
  1734. u16 parm_list_len;
  1735. u8 page_format;
  1736. u8 save_pages;
  1737. page_format = cmd[1] & MODE_SELECT_CDB_PAGE_FORMAT_MASK;
  1738. save_pages = cmd[1] & MODE_SELECT_CDB_SAVE_PAGES_MASK;
  1739. if (cmd[0] == MODE_SELECT) {
  1740. parm_list_len = cmd[4];
  1741. } else {
  1742. parm_list_len = cmd[7];
  1743. cdb10 = 1;
  1744. }
  1745. if (parm_list_len != 0) {
  1746. /*
  1747. * According to SPC-4 r24, a paramter list length field of 0
  1748. * shall not be considered an error
  1749. */
  1750. return nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
  1751. page_format, save_pages, cdb10);
  1752. }
  1753. return 0;
  1754. }
  1755. static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1756. u8 *cmd)
  1757. {
  1758. int res = 0;
  1759. u16 alloc_len;
  1760. u8 cdb10 = 0;
  1761. if (cmd[0] == MODE_SENSE) {
  1762. alloc_len = cmd[4];
  1763. } else {
  1764. alloc_len = get_unaligned_be16(&cmd[7]);
  1765. cdb10 = 1;
  1766. }
  1767. if ((cmd[2] & MODE_SENSE_PAGE_CONTROL_MASK) !=
  1768. MODE_SENSE_PC_CURRENT_VALUES) {
  1769. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1770. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1771. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1772. goto out;
  1773. }
  1774. switch (cmd[2] & MODE_SENSE_PAGE_CODE_MASK) {
  1775. case MODE_PAGE_CACHING:
  1776. res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
  1777. cdb10,
  1778. &nvme_trans_fill_caching_page,
  1779. MODE_PAGE_CACHING_LEN);
  1780. break;
  1781. case MODE_PAGE_CONTROL:
  1782. res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
  1783. cdb10,
  1784. &nvme_trans_fill_control_page,
  1785. MODE_PAGE_CONTROL_LEN);
  1786. break;
  1787. case MODE_PAGE_POWER_CONDITION:
  1788. res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
  1789. cdb10,
  1790. &nvme_trans_fill_pow_cnd_page,
  1791. MODE_PAGE_POW_CND_LEN);
  1792. break;
  1793. case MODE_PAGE_INFO_EXCEP:
  1794. res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
  1795. cdb10,
  1796. &nvme_trans_fill_inf_exc_page,
  1797. MODE_PAGE_INF_EXC_LEN);
  1798. break;
  1799. case MODE_PAGE_RETURN_ALL:
  1800. res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
  1801. cdb10,
  1802. &nvme_trans_fill_all_pages,
  1803. MODE_PAGE_ALL_LEN);
  1804. break;
  1805. default:
  1806. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1807. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1808. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1809. break;
  1810. }
  1811. out:
  1812. return res;
  1813. }
  1814. static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1815. u8 *cmd, u8 cdb16)
  1816. {
  1817. int res;
  1818. int nvme_sc;
  1819. u32 alloc_len;
  1820. u32 resp_size;
  1821. u32 xfer_len;
  1822. struct nvme_dev *dev = ns->dev;
  1823. struct nvme_id_ns *id_ns;
  1824. u8 *response;
  1825. if (cdb16) {
  1826. alloc_len = get_unaligned_be32(&cmd[10]);
  1827. resp_size = READ_CAP_16_RESP_SIZE;
  1828. } else {
  1829. alloc_len = READ_CAP_10_RESP_SIZE;
  1830. resp_size = READ_CAP_10_RESP_SIZE;
  1831. }
  1832. nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
  1833. res = nvme_trans_status_code(hdr, nvme_sc);
  1834. if (res)
  1835. return res;
  1836. response = kzalloc(resp_size, GFP_KERNEL);
  1837. if (response == NULL) {
  1838. res = -ENOMEM;
  1839. goto out_free_id;
  1840. }
  1841. nvme_trans_fill_read_cap(response, id_ns, cdb16);
  1842. xfer_len = min(alloc_len, resp_size);
  1843. res = nvme_trans_copy_to_user(hdr, response, xfer_len);
  1844. kfree(response);
  1845. out_free_id:
  1846. kfree(id_ns);
  1847. return res;
  1848. }
  1849. static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1850. u8 *cmd)
  1851. {
  1852. int res;
  1853. int nvme_sc;
  1854. u32 alloc_len, xfer_len, resp_size;
  1855. u8 *response;
  1856. struct nvme_dev *dev = ns->dev;
  1857. struct nvme_id_ctrl *id_ctrl;
  1858. u32 ll_length, lun_id;
  1859. u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
  1860. __be32 tmp_len;
  1861. switch (cmd[2]) {
  1862. default:
  1863. return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1864. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1865. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1866. case ALL_LUNS_RETURNED:
  1867. case ALL_WELL_KNOWN_LUNS_RETURNED:
  1868. case RESTRICTED_LUNS_RETURNED:
  1869. nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
  1870. res = nvme_trans_status_code(hdr, nvme_sc);
  1871. if (res)
  1872. return res;
  1873. ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
  1874. resp_size = ll_length + LUN_DATA_HEADER_SIZE;
  1875. alloc_len = get_unaligned_be32(&cmd[6]);
  1876. if (alloc_len < resp_size) {
  1877. res = nvme_trans_completion(hdr,
  1878. SAM_STAT_CHECK_CONDITION,
  1879. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1880. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1881. goto out_free_id;
  1882. }
  1883. response = kzalloc(resp_size, GFP_KERNEL);
  1884. if (response == NULL) {
  1885. res = -ENOMEM;
  1886. goto out_free_id;
  1887. }
  1888. /* The first LUN ID will always be 0 per the SAM spec */
  1889. for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
  1890. /*
  1891. * Set the LUN Id and then increment to the next LUN
  1892. * location in the parameter data.
  1893. */
  1894. __be64 tmp_id = cpu_to_be64(lun_id);
  1895. memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
  1896. lun_id_offset += LUN_ENTRY_SIZE;
  1897. }
  1898. tmp_len = cpu_to_be32(ll_length);
  1899. memcpy(response, &tmp_len, sizeof(u32));
  1900. }
  1901. xfer_len = min(alloc_len, resp_size);
  1902. res = nvme_trans_copy_to_user(hdr, response, xfer_len);
  1903. kfree(response);
  1904. out_free_id:
  1905. kfree(id_ctrl);
  1906. return res;
  1907. }
  1908. static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1909. u8 *cmd)
  1910. {
  1911. int res;
  1912. u8 alloc_len, xfer_len, resp_size;
  1913. u8 desc_format;
  1914. u8 *response;
  1915. desc_format = cmd[1] & 0x01;
  1916. alloc_len = cmd[4];
  1917. resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
  1918. (FIXED_FMT_SENSE_DATA_SIZE));
  1919. response = kzalloc(resp_size, GFP_KERNEL);
  1920. if (response == NULL) {
  1921. res = -ENOMEM;
  1922. goto out;
  1923. }
  1924. if (desc_format) {
  1925. /* Descriptor Format Sense Data */
  1926. response[0] = DESC_FORMAT_SENSE_DATA;
  1927. response[1] = NO_SENSE;
  1928. /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */
  1929. response[2] = SCSI_ASC_NO_SENSE;
  1930. response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  1931. /* SDAT_OVFL = 0 | Additional Sense Length = 0 */
  1932. } else {
  1933. /* Fixed Format Sense Data */
  1934. response[0] = FIXED_SENSE_DATA;
  1935. /* Byte 1 = Obsolete */
  1936. response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */
  1937. /* Bytes 3-6 - Information - set to zero */
  1938. response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
  1939. /* Bytes 8-11 - Cmd Specific Information - set to zero */
  1940. response[12] = SCSI_ASC_NO_SENSE;
  1941. response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
  1942. /* Byte 14 = Field Replaceable Unit Code = 0 */
  1943. /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */
  1944. }
  1945. xfer_len = min(alloc_len, resp_size);
  1946. res = nvme_trans_copy_to_user(hdr, response, xfer_len);
  1947. kfree(response);
  1948. out:
  1949. return res;
  1950. }
  1951. static int nvme_trans_security_protocol(struct nvme_ns *ns,
  1952. struct sg_io_hdr *hdr,
  1953. u8 *cmd)
  1954. {
  1955. return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1956. ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
  1957. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1958. }
  1959. static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
  1960. struct sg_io_hdr *hdr)
  1961. {
  1962. int nvme_sc;
  1963. struct nvme_command c;
  1964. memset(&c, 0, sizeof(c));
  1965. c.common.opcode = nvme_cmd_flush;
  1966. c.common.nsid = cpu_to_le32(ns->ns_id);
  1967. nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, NULL, 0);
  1968. return nvme_trans_status_code(hdr, nvme_sc);
  1969. }
  1970. static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1971. u8 *cmd)
  1972. {
  1973. u8 immed, pcmod, pc, no_flush, start;
  1974. immed = cmd[1] & 0x01;
  1975. pcmod = cmd[3] & 0x0f;
  1976. pc = (cmd[4] & 0xf0) >> 4;
  1977. no_flush = cmd[4] & 0x04;
  1978. start = cmd[4] & 0x01;
  1979. if (immed != 0) {
  1980. return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  1981. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  1982. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  1983. } else {
  1984. if (no_flush == 0) {
  1985. /* Issue NVME FLUSH command prior to START STOP UNIT */
  1986. int res = nvme_trans_synchronize_cache(ns, hdr);
  1987. if (res)
  1988. return res;
  1989. }
  1990. /* Setup the expected power state transition */
  1991. return nvme_trans_power_state(ns, hdr, pc, pcmod, start);
  1992. }
  1993. }
  1994. static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  1995. u8 *cmd)
  1996. {
  1997. int res;
  1998. u8 parm_hdr_len = 0;
  1999. u8 nvme_pf_code = 0;
  2000. u8 format_prot_info, long_list, format_data;
  2001. format_prot_info = (cmd[1] & 0xc0) >> 6;
  2002. long_list = cmd[1] & 0x20;
  2003. format_data = cmd[1] & 0x10;
  2004. if (format_data != 0) {
  2005. if (format_prot_info != 0) {
  2006. if (long_list == 0)
  2007. parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
  2008. else
  2009. parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
  2010. }
  2011. } else if (format_data == 0 && format_prot_info != 0) {
  2012. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  2013. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  2014. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  2015. goto out;
  2016. }
  2017. /* Get parm header from data-in/out buffer */
  2018. /*
  2019. * According to the translation spec, the only fields in the parameter
  2020. * list we are concerned with are in the header. So allocate only that.
  2021. */
  2022. if (parm_hdr_len > 0) {
  2023. res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
  2024. format_prot_info, &nvme_pf_code);
  2025. if (res)
  2026. goto out;
  2027. }
  2028. /* Attempt to activate any previously downloaded firmware image */
  2029. res = nvme_trans_send_activate_fw_cmd(ns, hdr, 0);
  2030. /* Determine Block size and count and send format command */
  2031. res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
  2032. if (res)
  2033. goto out;
  2034. res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
  2035. out:
  2036. return res;
  2037. }
  2038. static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
  2039. struct sg_io_hdr *hdr,
  2040. u8 *cmd)
  2041. {
  2042. struct nvme_dev *dev = ns->dev;
  2043. if (!(readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_RDY))
  2044. return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  2045. NOT_READY, SCSI_ASC_LUN_NOT_READY,
  2046. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  2047. else
  2048. return nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
  2049. }
  2050. static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  2051. u8 *cmd)
  2052. {
  2053. int res = 0;
  2054. u32 buffer_offset, parm_list_length;
  2055. u8 buffer_id, mode;
  2056. parm_list_length = get_unaligned_be24(&cmd[6]);
  2057. if (parm_list_length % BYTES_TO_DWORDS != 0) {
  2058. /* NVMe expects Firmware file to be a whole number of DWORDS */
  2059. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  2060. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  2061. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  2062. goto out;
  2063. }
  2064. buffer_id = cmd[2];
  2065. if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
  2066. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  2067. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  2068. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  2069. goto out;
  2070. }
  2071. mode = cmd[1] & 0x1f;
  2072. buffer_offset = get_unaligned_be24(&cmd[3]);
  2073. switch (mode) {
  2074. case DOWNLOAD_SAVE_ACTIVATE:
  2075. res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw,
  2076. parm_list_length, buffer_offset,
  2077. buffer_id);
  2078. if (res)
  2079. goto out;
  2080. res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id);
  2081. break;
  2082. case DOWNLOAD_SAVE_DEFER_ACTIVATE:
  2083. res = nvme_trans_send_download_fw_cmd(ns, hdr, nvme_admin_download_fw,
  2084. parm_list_length, buffer_offset,
  2085. buffer_id);
  2086. break;
  2087. case ACTIVATE_DEFERRED_MICROCODE:
  2088. res = nvme_trans_send_activate_fw_cmd(ns, hdr, buffer_id);
  2089. break;
  2090. default:
  2091. res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  2092. ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
  2093. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  2094. break;
  2095. }
  2096. out:
  2097. return res;
  2098. }
  2099. struct scsi_unmap_blk_desc {
  2100. __be64 slba;
  2101. __be32 nlb;
  2102. u32 resv;
  2103. };
  2104. struct scsi_unmap_parm_list {
  2105. __be16 unmap_data_len;
  2106. __be16 unmap_blk_desc_data_len;
  2107. u32 resv;
  2108. struct scsi_unmap_blk_desc desc[0];
  2109. };
  2110. static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
  2111. u8 *cmd)
  2112. {
  2113. struct scsi_unmap_parm_list *plist;
  2114. struct nvme_dsm_range *range;
  2115. struct nvme_command c;
  2116. int i, nvme_sc, res;
  2117. u16 ndesc, list_len;
  2118. list_len = get_unaligned_be16(&cmd[7]);
  2119. if (!list_len)
  2120. return -EINVAL;
  2121. plist = kmalloc(list_len, GFP_KERNEL);
  2122. if (!plist)
  2123. return -ENOMEM;
  2124. res = nvme_trans_copy_from_user(hdr, plist, list_len);
  2125. if (res)
  2126. goto out;
  2127. ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4;
  2128. if (!ndesc || ndesc > 256) {
  2129. res = -EINVAL;
  2130. goto out;
  2131. }
  2132. range = kcalloc(ndesc, sizeof(*range), GFP_KERNEL);
  2133. if (!range) {
  2134. res = -ENOMEM;
  2135. goto out;
  2136. }
  2137. for (i = 0; i < ndesc; i++) {
  2138. range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb));
  2139. range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba));
  2140. range[i].cattr = 0;
  2141. }
  2142. memset(&c, 0, sizeof(c));
  2143. c.dsm.opcode = nvme_cmd_dsm;
  2144. c.dsm.nsid = cpu_to_le32(ns->ns_id);
  2145. c.dsm.nr = cpu_to_le32(ndesc - 1);
  2146. c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
  2147. nvme_sc = nvme_submit_sync_cmd(ns->queue, &c, range,
  2148. ndesc * sizeof(*range));
  2149. res = nvme_trans_status_code(hdr, nvme_sc);
  2150. kfree(range);
  2151. out:
  2152. kfree(plist);
  2153. return res;
  2154. }
  2155. static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
  2156. {
  2157. u8 cmd[BLK_MAX_CDB];
  2158. int retcode;
  2159. unsigned int opcode;
  2160. if (hdr->cmdp == NULL)
  2161. return -EMSGSIZE;
  2162. if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
  2163. return -EFAULT;
  2164. /*
  2165. * Prime the hdr with good status for scsi commands that don't require
  2166. * an nvme command for translation.
  2167. */
  2168. retcode = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
  2169. if (retcode)
  2170. return retcode;
  2171. opcode = cmd[0];
  2172. switch (opcode) {
  2173. case READ_6:
  2174. case READ_10:
  2175. case READ_12:
  2176. case READ_16:
  2177. retcode = nvme_trans_io(ns, hdr, 0, cmd);
  2178. break;
  2179. case WRITE_6:
  2180. case WRITE_10:
  2181. case WRITE_12:
  2182. case WRITE_16:
  2183. retcode = nvme_trans_io(ns, hdr, 1, cmd);
  2184. break;
  2185. case INQUIRY:
  2186. retcode = nvme_trans_inquiry(ns, hdr, cmd);
  2187. break;
  2188. case LOG_SENSE:
  2189. retcode = nvme_trans_log_sense(ns, hdr, cmd);
  2190. break;
  2191. case MODE_SELECT:
  2192. case MODE_SELECT_10:
  2193. retcode = nvme_trans_mode_select(ns, hdr, cmd);
  2194. break;
  2195. case MODE_SENSE:
  2196. case MODE_SENSE_10:
  2197. retcode = nvme_trans_mode_sense(ns, hdr, cmd);
  2198. break;
  2199. case READ_CAPACITY:
  2200. retcode = nvme_trans_read_capacity(ns, hdr, cmd, 0);
  2201. break;
  2202. case SERVICE_ACTION_IN_16:
  2203. switch (cmd[1]) {
  2204. case SAI_READ_CAPACITY_16:
  2205. retcode = nvme_trans_read_capacity(ns, hdr, cmd, 1);
  2206. break;
  2207. default:
  2208. goto out;
  2209. }
  2210. break;
  2211. case REPORT_LUNS:
  2212. retcode = nvme_trans_report_luns(ns, hdr, cmd);
  2213. break;
  2214. case REQUEST_SENSE:
  2215. retcode = nvme_trans_request_sense(ns, hdr, cmd);
  2216. break;
  2217. case SECURITY_PROTOCOL_IN:
  2218. case SECURITY_PROTOCOL_OUT:
  2219. retcode = nvme_trans_security_protocol(ns, hdr, cmd);
  2220. break;
  2221. case START_STOP:
  2222. retcode = nvme_trans_start_stop(ns, hdr, cmd);
  2223. break;
  2224. case SYNCHRONIZE_CACHE:
  2225. retcode = nvme_trans_synchronize_cache(ns, hdr);
  2226. break;
  2227. case FORMAT_UNIT:
  2228. retcode = nvme_trans_format_unit(ns, hdr, cmd);
  2229. break;
  2230. case TEST_UNIT_READY:
  2231. retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
  2232. break;
  2233. case WRITE_BUFFER:
  2234. retcode = nvme_trans_write_buffer(ns, hdr, cmd);
  2235. break;
  2236. case UNMAP:
  2237. retcode = nvme_trans_unmap(ns, hdr, cmd);
  2238. break;
  2239. default:
  2240. out:
  2241. retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
  2242. ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
  2243. SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
  2244. break;
  2245. }
  2246. return retcode;
  2247. }
  2248. int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
  2249. {
  2250. struct sg_io_hdr hdr;
  2251. int retcode;
  2252. if (!capable(CAP_SYS_ADMIN))
  2253. return -EACCES;
  2254. if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
  2255. return -EFAULT;
  2256. if (hdr.interface_id != 'S')
  2257. return -EINVAL;
  2258. if (hdr.cmd_len > BLK_MAX_CDB)
  2259. return -EINVAL;
  2260. /*
  2261. * A positive return code means a NVMe status, which has been
  2262. * translated to sense data.
  2263. */
  2264. retcode = nvme_scsi_translate(ns, &hdr);
  2265. if (retcode < 0)
  2266. return retcode;
  2267. if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
  2268. return -EFAULT;
  2269. return 0;
  2270. }
  2271. int nvme_sg_get_version_num(int __user *ip)
  2272. {
  2273. return put_user(sg_version_num, ip);
  2274. }