skd_main.c 93 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664
  1. /*
  2. * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
  3. * was acquired by Western Digital in 2012.
  4. *
  5. * Copyright 2012 sTec, Inc.
  6. * Copyright (c) 2017 Western Digital Corporation or its affiliates.
  7. *
  8. * This file is part of the Linux kernel, and is made available under
  9. * the terms of the GNU General Public License version 2.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/pci.h>
  15. #include <linux/slab.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/blk-mq.h>
  19. #include <linux/sched.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/compiler.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/delay.h>
  24. #include <linux/time.h>
  25. #include <linux/hdreg.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/completion.h>
  28. #include <linux/scatterlist.h>
  29. #include <linux/version.h>
  30. #include <linux/err.h>
  31. #include <linux/aer.h>
  32. #include <linux/wait.h>
  33. #include <linux/stringify.h>
  34. #include <scsi/scsi.h>
  35. #include <scsi/sg.h>
  36. #include <linux/io.h>
  37. #include <linux/uaccess.h>
  38. #include <asm/unaligned.h>
  39. #include "skd_s1120.h"
  40. static int skd_dbg_level;
  41. static int skd_isr_comp_limit = 4;
  42. #define SKD_ASSERT(expr) \
  43. do { \
  44. if (unlikely(!(expr))) { \
  45. pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
  46. # expr, __FILE__, __func__, __LINE__); \
  47. } \
  48. } while (0)
  49. #define DRV_NAME "skd"
  50. #define PFX DRV_NAME ": "
  51. MODULE_LICENSE("GPL");
  52. MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
  53. #define PCI_VENDOR_ID_STEC 0x1B39
  54. #define PCI_DEVICE_ID_S1120 0x0001
  55. #define SKD_FUA_NV (1 << 1)
  56. #define SKD_MINORS_PER_DEVICE 16
  57. #define SKD_MAX_QUEUE_DEPTH 200u
  58. #define SKD_PAUSE_TIMEOUT (5 * 1000)
  59. #define SKD_N_FITMSG_BYTES (512u)
  60. #define SKD_MAX_REQ_PER_MSG 14
  61. #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
  62. /* SG elements are 32 bytes, so we can make this 4096 and still be under the
  63. * 128KB limit. That allows 4096*4K = 16M xfer size
  64. */
  65. #define SKD_N_SG_PER_REQ_DEFAULT 256u
  66. #define SKD_N_COMPLETION_ENTRY 256u
  67. #define SKD_N_READ_CAP_BYTES (8u)
  68. #define SKD_N_INTERNAL_BYTES (512u)
  69. #define SKD_SKCOMP_SIZE \
  70. ((sizeof(struct fit_completion_entry_v1) + \
  71. sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
  72. /* 5 bits of uniqifier, 0xF800 */
  73. #define SKD_ID_TABLE_MASK (3u << 8u)
  74. #define SKD_ID_RW_REQUEST (0u << 8u)
  75. #define SKD_ID_INTERNAL (1u << 8u)
  76. #define SKD_ID_FIT_MSG (3u << 8u)
  77. #define SKD_ID_SLOT_MASK 0x00FFu
  78. #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
  79. #define SKD_N_MAX_SECTORS 2048u
  80. #define SKD_MAX_RETRIES 2u
  81. #define SKD_TIMER_SECONDS(seconds) (seconds)
  82. #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
  83. #define INQ_STD_NBYTES 36
  84. enum skd_drvr_state {
  85. SKD_DRVR_STATE_LOAD,
  86. SKD_DRVR_STATE_IDLE,
  87. SKD_DRVR_STATE_BUSY,
  88. SKD_DRVR_STATE_STARTING,
  89. SKD_DRVR_STATE_ONLINE,
  90. SKD_DRVR_STATE_PAUSING,
  91. SKD_DRVR_STATE_PAUSED,
  92. SKD_DRVR_STATE_RESTARTING,
  93. SKD_DRVR_STATE_RESUMING,
  94. SKD_DRVR_STATE_STOPPING,
  95. SKD_DRVR_STATE_FAULT,
  96. SKD_DRVR_STATE_DISAPPEARED,
  97. SKD_DRVR_STATE_PROTOCOL_MISMATCH,
  98. SKD_DRVR_STATE_BUSY_ERASE,
  99. SKD_DRVR_STATE_BUSY_SANITIZE,
  100. SKD_DRVR_STATE_BUSY_IMMINENT,
  101. SKD_DRVR_STATE_WAIT_BOOT,
  102. SKD_DRVR_STATE_SYNCING,
  103. };
  104. #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
  105. #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
  106. #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
  107. #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
  108. #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
  109. #define SKD_START_WAIT_SECONDS 90u
  110. enum skd_req_state {
  111. SKD_REQ_STATE_IDLE,
  112. SKD_REQ_STATE_SETUP,
  113. SKD_REQ_STATE_BUSY,
  114. SKD_REQ_STATE_COMPLETED,
  115. SKD_REQ_STATE_TIMEOUT,
  116. };
  117. enum skd_check_status_action {
  118. SKD_CHECK_STATUS_REPORT_GOOD,
  119. SKD_CHECK_STATUS_REPORT_SMART_ALERT,
  120. SKD_CHECK_STATUS_REQUEUE_REQUEST,
  121. SKD_CHECK_STATUS_REPORT_ERROR,
  122. SKD_CHECK_STATUS_BUSY_IMMINENT,
  123. };
  124. struct skd_msg_buf {
  125. struct fit_msg_hdr fmh;
  126. struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
  127. };
  128. struct skd_fitmsg_context {
  129. u32 id;
  130. u32 length;
  131. struct skd_msg_buf *msg_buf;
  132. dma_addr_t mb_dma_address;
  133. };
  134. struct skd_request_context {
  135. enum skd_req_state state;
  136. u16 id;
  137. u32 fitmsg_id;
  138. u8 flush_cmd;
  139. enum dma_data_direction data_dir;
  140. struct scatterlist *sg;
  141. u32 n_sg;
  142. u32 sg_byte_count;
  143. struct fit_sg_descriptor *sksg_list;
  144. dma_addr_t sksg_dma_address;
  145. struct fit_completion_entry_v1 completion;
  146. struct fit_comp_error_info err_info;
  147. blk_status_t status;
  148. };
  149. struct skd_special_context {
  150. struct skd_request_context req;
  151. void *data_buf;
  152. dma_addr_t db_dma_address;
  153. struct skd_msg_buf *msg_buf;
  154. dma_addr_t mb_dma_address;
  155. };
  156. typedef enum skd_irq_type {
  157. SKD_IRQ_LEGACY,
  158. SKD_IRQ_MSI,
  159. SKD_IRQ_MSIX
  160. } skd_irq_type_t;
  161. #define SKD_MAX_BARS 2
  162. struct skd_device {
  163. void __iomem *mem_map[SKD_MAX_BARS];
  164. resource_size_t mem_phys[SKD_MAX_BARS];
  165. u32 mem_size[SKD_MAX_BARS];
  166. struct skd_msix_entry *msix_entries;
  167. struct pci_dev *pdev;
  168. int pcie_error_reporting_is_enabled;
  169. spinlock_t lock;
  170. struct gendisk *disk;
  171. struct blk_mq_tag_set tag_set;
  172. struct request_queue *queue;
  173. struct skd_fitmsg_context *skmsg;
  174. struct device *class_dev;
  175. int gendisk_on;
  176. int sync_done;
  177. u32 devno;
  178. u32 major;
  179. char isr_name[30];
  180. enum skd_drvr_state state;
  181. u32 drive_state;
  182. u32 cur_max_queue_depth;
  183. u32 queue_low_water_mark;
  184. u32 dev_max_queue_depth;
  185. u32 num_fitmsg_context;
  186. u32 num_req_context;
  187. struct skd_fitmsg_context *skmsg_table;
  188. struct skd_special_context internal_skspcl;
  189. u32 read_cap_blocksize;
  190. u32 read_cap_last_lba;
  191. int read_cap_is_valid;
  192. int inquiry_is_valid;
  193. u8 inq_serial_num[13]; /*12 chars plus null term */
  194. u8 skcomp_cycle;
  195. u32 skcomp_ix;
  196. struct kmem_cache *msgbuf_cache;
  197. struct kmem_cache *sglist_cache;
  198. struct kmem_cache *databuf_cache;
  199. struct fit_completion_entry_v1 *skcomp_table;
  200. struct fit_comp_error_info *skerr_table;
  201. dma_addr_t cq_dma_address;
  202. wait_queue_head_t waitq;
  203. struct timer_list timer;
  204. u32 timer_countdown;
  205. u32 timer_substate;
  206. int sgs_per_request;
  207. u32 last_mtd;
  208. u32 proto_ver;
  209. int dbg_level;
  210. u32 connect_time_stamp;
  211. int connect_retries;
  212. #define SKD_MAX_CONNECT_RETRIES 16
  213. u32 drive_jiffies;
  214. u32 timo_slot;
  215. struct work_struct start_queue;
  216. struct work_struct completion_worker;
  217. };
  218. #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
  219. #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
  220. #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
  221. static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
  222. {
  223. u32 val = readl(skdev->mem_map[1] + offset);
  224. if (unlikely(skdev->dbg_level >= 2))
  225. dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
  226. return val;
  227. }
  228. static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
  229. u32 offset)
  230. {
  231. writel(val, skdev->mem_map[1] + offset);
  232. if (unlikely(skdev->dbg_level >= 2))
  233. dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
  234. }
  235. static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
  236. u32 offset)
  237. {
  238. writeq(val, skdev->mem_map[1] + offset);
  239. if (unlikely(skdev->dbg_level >= 2))
  240. dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
  241. val);
  242. }
  243. #define SKD_IRQ_DEFAULT SKD_IRQ_MSIX
  244. static int skd_isr_type = SKD_IRQ_DEFAULT;
  245. module_param(skd_isr_type, int, 0444);
  246. MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
  247. " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
  248. #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
  249. static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  250. module_param(skd_max_req_per_msg, int, 0444);
  251. MODULE_PARM_DESC(skd_max_req_per_msg,
  252. "Maximum SCSI requests packed in a single message."
  253. " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
  254. #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
  255. #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
  256. static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  257. module_param(skd_max_queue_depth, int, 0444);
  258. MODULE_PARM_DESC(skd_max_queue_depth,
  259. "Maximum SCSI requests issued to s1120."
  260. " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
  261. static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  262. module_param(skd_sgs_per_request, int, 0444);
  263. MODULE_PARM_DESC(skd_sgs_per_request,
  264. "Maximum SG elements per block request."
  265. " (1-4096, default==256)");
  266. static int skd_max_pass_thru = 1;
  267. module_param(skd_max_pass_thru, int, 0444);
  268. MODULE_PARM_DESC(skd_max_pass_thru,
  269. "Maximum SCSI pass-thru at a time. IGNORED");
  270. module_param(skd_dbg_level, int, 0444);
  271. MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
  272. module_param(skd_isr_comp_limit, int, 0444);
  273. MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
  274. /* Major device number dynamically assigned. */
  275. static u32 skd_major;
  276. static void skd_destruct(struct skd_device *skdev);
  277. static const struct block_device_operations skd_blockdev_ops;
  278. static void skd_send_fitmsg(struct skd_device *skdev,
  279. struct skd_fitmsg_context *skmsg);
  280. static void skd_send_special_fitmsg(struct skd_device *skdev,
  281. struct skd_special_context *skspcl);
  282. static bool skd_preop_sg_list(struct skd_device *skdev,
  283. struct skd_request_context *skreq);
  284. static void skd_postop_sg_list(struct skd_device *skdev,
  285. struct skd_request_context *skreq);
  286. static void skd_restart_device(struct skd_device *skdev);
  287. static int skd_quiesce_dev(struct skd_device *skdev);
  288. static int skd_unquiesce_dev(struct skd_device *skdev);
  289. static void skd_disable_interrupts(struct skd_device *skdev);
  290. static void skd_isr_fwstate(struct skd_device *skdev);
  291. static void skd_recover_requests(struct skd_device *skdev);
  292. static void skd_soft_reset(struct skd_device *skdev);
  293. const char *skd_drive_state_to_str(int state);
  294. const char *skd_skdev_state_to_str(enum skd_drvr_state state);
  295. static void skd_log_skdev(struct skd_device *skdev, const char *event);
  296. static void skd_log_skreq(struct skd_device *skdev,
  297. struct skd_request_context *skreq, const char *event);
  298. /*
  299. *****************************************************************************
  300. * READ/WRITE REQUESTS
  301. *****************************************************************************
  302. */
  303. static void skd_inc_in_flight(struct request *rq, void *data, bool reserved)
  304. {
  305. int *count = data;
  306. count++;
  307. }
  308. static int skd_in_flight(struct skd_device *skdev)
  309. {
  310. int count = 0;
  311. blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
  312. return count;
  313. }
  314. static void
  315. skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
  316. int data_dir, unsigned lba,
  317. unsigned count)
  318. {
  319. if (data_dir == READ)
  320. scsi_req->cdb[0] = READ_10;
  321. else
  322. scsi_req->cdb[0] = WRITE_10;
  323. scsi_req->cdb[1] = 0;
  324. scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
  325. scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
  326. scsi_req->cdb[4] = (lba & 0xff00) >> 8;
  327. scsi_req->cdb[5] = (lba & 0xff);
  328. scsi_req->cdb[6] = 0;
  329. scsi_req->cdb[7] = (count & 0xff00) >> 8;
  330. scsi_req->cdb[8] = count & 0xff;
  331. scsi_req->cdb[9] = 0;
  332. }
  333. static void
  334. skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
  335. struct skd_request_context *skreq)
  336. {
  337. skreq->flush_cmd = 1;
  338. scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
  339. scsi_req->cdb[1] = 0;
  340. scsi_req->cdb[2] = 0;
  341. scsi_req->cdb[3] = 0;
  342. scsi_req->cdb[4] = 0;
  343. scsi_req->cdb[5] = 0;
  344. scsi_req->cdb[6] = 0;
  345. scsi_req->cdb[7] = 0;
  346. scsi_req->cdb[8] = 0;
  347. scsi_req->cdb[9] = 0;
  348. }
  349. /*
  350. * Return true if and only if all pending requests should be failed.
  351. */
  352. static bool skd_fail_all(struct request_queue *q)
  353. {
  354. struct skd_device *skdev = q->queuedata;
  355. SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
  356. skd_log_skdev(skdev, "req_not_online");
  357. switch (skdev->state) {
  358. case SKD_DRVR_STATE_PAUSING:
  359. case SKD_DRVR_STATE_PAUSED:
  360. case SKD_DRVR_STATE_STARTING:
  361. case SKD_DRVR_STATE_RESTARTING:
  362. case SKD_DRVR_STATE_WAIT_BOOT:
  363. /* In case of starting, we haven't started the queue,
  364. * so we can't get here... but requests are
  365. * possibly hanging out waiting for us because we
  366. * reported the dev/skd0 already. They'll wait
  367. * forever if connect doesn't complete.
  368. * What to do??? delay dev/skd0 ??
  369. */
  370. case SKD_DRVR_STATE_BUSY:
  371. case SKD_DRVR_STATE_BUSY_IMMINENT:
  372. case SKD_DRVR_STATE_BUSY_ERASE:
  373. return false;
  374. case SKD_DRVR_STATE_BUSY_SANITIZE:
  375. case SKD_DRVR_STATE_STOPPING:
  376. case SKD_DRVR_STATE_SYNCING:
  377. case SKD_DRVR_STATE_FAULT:
  378. case SKD_DRVR_STATE_DISAPPEARED:
  379. default:
  380. return true;
  381. }
  382. }
  383. static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
  384. const struct blk_mq_queue_data *mqd)
  385. {
  386. struct request *const req = mqd->rq;
  387. struct request_queue *const q = req->q;
  388. struct skd_device *skdev = q->queuedata;
  389. struct skd_fitmsg_context *skmsg;
  390. struct fit_msg_hdr *fmh;
  391. const u32 tag = blk_mq_unique_tag(req);
  392. struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
  393. struct skd_scsi_request *scsi_req;
  394. unsigned long flags = 0;
  395. const u32 lba = blk_rq_pos(req);
  396. const u32 count = blk_rq_sectors(req);
  397. const int data_dir = rq_data_dir(req);
  398. if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
  399. return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
  400. blk_mq_start_request(req);
  401. WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
  402. tag, skd_max_queue_depth, q->nr_requests);
  403. SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
  404. dev_dbg(&skdev->pdev->dev,
  405. "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
  406. lba, count, count, data_dir);
  407. skreq->id = tag + SKD_ID_RW_REQUEST;
  408. skreq->flush_cmd = 0;
  409. skreq->n_sg = 0;
  410. skreq->sg_byte_count = 0;
  411. skreq->fitmsg_id = 0;
  412. skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  413. if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
  414. dev_dbg(&skdev->pdev->dev, "error Out\n");
  415. skreq->status = BLK_STS_RESOURCE;
  416. blk_mq_complete_request(req);
  417. return BLK_STS_OK;
  418. }
  419. dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
  420. skreq->n_sg *
  421. sizeof(struct fit_sg_descriptor),
  422. DMA_TO_DEVICE);
  423. /* Either a FIT msg is in progress or we have to start one. */
  424. if (skd_max_req_per_msg == 1) {
  425. skmsg = NULL;
  426. } else {
  427. spin_lock_irqsave(&skdev->lock, flags);
  428. skmsg = skdev->skmsg;
  429. }
  430. if (!skmsg) {
  431. skmsg = &skdev->skmsg_table[tag];
  432. skdev->skmsg = skmsg;
  433. /* Initialize the FIT msg header */
  434. fmh = &skmsg->msg_buf->fmh;
  435. memset(fmh, 0, sizeof(*fmh));
  436. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  437. skmsg->length = sizeof(*fmh);
  438. } else {
  439. fmh = &skmsg->msg_buf->fmh;
  440. }
  441. skreq->fitmsg_id = skmsg->id;
  442. scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
  443. memset(scsi_req, 0, sizeof(*scsi_req));
  444. scsi_req->hdr.tag = skreq->id;
  445. scsi_req->hdr.sg_list_dma_address =
  446. cpu_to_be64(skreq->sksg_dma_address);
  447. if (req_op(req) == REQ_OP_FLUSH) {
  448. skd_prep_zerosize_flush_cdb(scsi_req, skreq);
  449. SKD_ASSERT(skreq->flush_cmd == 1);
  450. } else {
  451. skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
  452. }
  453. if (req->cmd_flags & REQ_FUA)
  454. scsi_req->cdb[1] |= SKD_FUA_NV;
  455. scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
  456. /* Complete resource allocations. */
  457. skreq->state = SKD_REQ_STATE_BUSY;
  458. skmsg->length += sizeof(struct skd_scsi_request);
  459. fmh->num_protocol_cmds_coalesced++;
  460. dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
  461. skd_in_flight(skdev));
  462. /*
  463. * If the FIT msg buffer is full send it.
  464. */
  465. if (skd_max_req_per_msg == 1) {
  466. skd_send_fitmsg(skdev, skmsg);
  467. } else {
  468. if (mqd->last ||
  469. fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
  470. skd_send_fitmsg(skdev, skmsg);
  471. skdev->skmsg = NULL;
  472. }
  473. spin_unlock_irqrestore(&skdev->lock, flags);
  474. }
  475. return BLK_STS_OK;
  476. }
  477. static enum blk_eh_timer_return skd_timed_out(struct request *req,
  478. bool reserved)
  479. {
  480. struct skd_device *skdev = req->q->queuedata;
  481. dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
  482. blk_mq_unique_tag(req));
  483. return BLK_EH_RESET_TIMER;
  484. }
  485. static void skd_complete_rq(struct request *req)
  486. {
  487. struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
  488. blk_mq_end_request(req, skreq->status);
  489. }
  490. static bool skd_preop_sg_list(struct skd_device *skdev,
  491. struct skd_request_context *skreq)
  492. {
  493. struct request *req = blk_mq_rq_from_pdu(skreq);
  494. struct scatterlist *sgl = &skreq->sg[0], *sg;
  495. int n_sg;
  496. int i;
  497. skreq->sg_byte_count = 0;
  498. WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
  499. skreq->data_dir != DMA_FROM_DEVICE);
  500. n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
  501. if (n_sg <= 0)
  502. return false;
  503. /*
  504. * Map scatterlist to PCI bus addresses.
  505. * Note PCI might change the number of entries.
  506. */
  507. n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir);
  508. if (n_sg <= 0)
  509. return false;
  510. SKD_ASSERT(n_sg <= skdev->sgs_per_request);
  511. skreq->n_sg = n_sg;
  512. for_each_sg(sgl, sg, n_sg, i) {
  513. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  514. u32 cnt = sg_dma_len(sg);
  515. uint64_t dma_addr = sg_dma_address(sg);
  516. sgd->control = FIT_SGD_CONTROL_NOT_LAST;
  517. sgd->byte_count = cnt;
  518. skreq->sg_byte_count += cnt;
  519. sgd->host_side_addr = dma_addr;
  520. sgd->dev_side_addr = 0;
  521. }
  522. skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
  523. skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
  524. if (unlikely(skdev->dbg_level > 1)) {
  525. dev_dbg(&skdev->pdev->dev,
  526. "skreq=%x sksg_list=%p sksg_dma=%pad\n",
  527. skreq->id, skreq->sksg_list, &skreq->sksg_dma_address);
  528. for (i = 0; i < n_sg; i++) {
  529. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  530. dev_dbg(&skdev->pdev->dev,
  531. " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
  532. i, sgd->byte_count, sgd->control,
  533. sgd->host_side_addr, sgd->next_desc_ptr);
  534. }
  535. }
  536. return true;
  537. }
  538. static void skd_postop_sg_list(struct skd_device *skdev,
  539. struct skd_request_context *skreq)
  540. {
  541. /*
  542. * restore the next ptr for next IO request so we
  543. * don't have to set it every time.
  544. */
  545. skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
  546. skreq->sksg_dma_address +
  547. ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
  548. dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg,
  549. skreq->data_dir);
  550. }
  551. /*
  552. *****************************************************************************
  553. * TIMER
  554. *****************************************************************************
  555. */
  556. static void skd_timer_tick_not_online(struct skd_device *skdev);
  557. static void skd_start_queue(struct work_struct *work)
  558. {
  559. struct skd_device *skdev = container_of(work, typeof(*skdev),
  560. start_queue);
  561. /*
  562. * Although it is safe to call blk_start_queue() from interrupt
  563. * context, blk_mq_start_hw_queues() must not be called from
  564. * interrupt context.
  565. */
  566. blk_mq_start_hw_queues(skdev->queue);
  567. }
  568. static void skd_timer_tick(struct timer_list *t)
  569. {
  570. struct skd_device *skdev = from_timer(skdev, t, timer);
  571. unsigned long reqflags;
  572. u32 state;
  573. if (skdev->state == SKD_DRVR_STATE_FAULT)
  574. /* The driver has declared fault, and we want it to
  575. * stay that way until driver is reloaded.
  576. */
  577. return;
  578. spin_lock_irqsave(&skdev->lock, reqflags);
  579. state = SKD_READL(skdev, FIT_STATUS);
  580. state &= FIT_SR_DRIVE_STATE_MASK;
  581. if (state != skdev->drive_state)
  582. skd_isr_fwstate(skdev);
  583. if (skdev->state != SKD_DRVR_STATE_ONLINE)
  584. skd_timer_tick_not_online(skdev);
  585. mod_timer(&skdev->timer, (jiffies + HZ));
  586. spin_unlock_irqrestore(&skdev->lock, reqflags);
  587. }
  588. static void skd_timer_tick_not_online(struct skd_device *skdev)
  589. {
  590. switch (skdev->state) {
  591. case SKD_DRVR_STATE_IDLE:
  592. case SKD_DRVR_STATE_LOAD:
  593. break;
  594. case SKD_DRVR_STATE_BUSY_SANITIZE:
  595. dev_dbg(&skdev->pdev->dev,
  596. "drive busy sanitize[%x], driver[%x]\n",
  597. skdev->drive_state, skdev->state);
  598. /* If we've been in sanitize for 3 seconds, we figure we're not
  599. * going to get anymore completions, so recover requests now
  600. */
  601. if (skdev->timer_countdown > 0) {
  602. skdev->timer_countdown--;
  603. return;
  604. }
  605. skd_recover_requests(skdev);
  606. break;
  607. case SKD_DRVR_STATE_BUSY:
  608. case SKD_DRVR_STATE_BUSY_IMMINENT:
  609. case SKD_DRVR_STATE_BUSY_ERASE:
  610. dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
  611. skdev->state, skdev->timer_countdown);
  612. if (skdev->timer_countdown > 0) {
  613. skdev->timer_countdown--;
  614. return;
  615. }
  616. dev_dbg(&skdev->pdev->dev,
  617. "busy[%x], timedout=%d, restarting device.",
  618. skdev->state, skdev->timer_countdown);
  619. skd_restart_device(skdev);
  620. break;
  621. case SKD_DRVR_STATE_WAIT_BOOT:
  622. case SKD_DRVR_STATE_STARTING:
  623. if (skdev->timer_countdown > 0) {
  624. skdev->timer_countdown--;
  625. return;
  626. }
  627. /* For now, we fault the drive. Could attempt resets to
  628. * revcover at some point. */
  629. skdev->state = SKD_DRVR_STATE_FAULT;
  630. dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
  631. skdev->drive_state);
  632. /*start the queue so we can respond with error to requests */
  633. /* wakeup anyone waiting for startup complete */
  634. schedule_work(&skdev->start_queue);
  635. skdev->gendisk_on = -1;
  636. wake_up_interruptible(&skdev->waitq);
  637. break;
  638. case SKD_DRVR_STATE_ONLINE:
  639. /* shouldn't get here. */
  640. break;
  641. case SKD_DRVR_STATE_PAUSING:
  642. case SKD_DRVR_STATE_PAUSED:
  643. break;
  644. case SKD_DRVR_STATE_RESTARTING:
  645. if (skdev->timer_countdown > 0) {
  646. skdev->timer_countdown--;
  647. return;
  648. }
  649. /* For now, we fault the drive. Could attempt resets to
  650. * revcover at some point. */
  651. skdev->state = SKD_DRVR_STATE_FAULT;
  652. dev_err(&skdev->pdev->dev,
  653. "DriveFault Reconnect Timeout (%x)\n",
  654. skdev->drive_state);
  655. /*
  656. * Recovering does two things:
  657. * 1. completes IO with error
  658. * 2. reclaims dma resources
  659. * When is it safe to recover requests?
  660. * - if the drive state is faulted
  661. * - if the state is still soft reset after out timeout
  662. * - if the drive registers are dead (state = FF)
  663. * If it is "unsafe", we still need to recover, so we will
  664. * disable pci bus mastering and disable our interrupts.
  665. */
  666. if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
  667. (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
  668. (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
  669. /* It never came out of soft reset. Try to
  670. * recover the requests and then let them
  671. * fail. This is to mitigate hung processes. */
  672. skd_recover_requests(skdev);
  673. else {
  674. dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
  675. skdev->drive_state);
  676. pci_disable_device(skdev->pdev);
  677. skd_disable_interrupts(skdev);
  678. skd_recover_requests(skdev);
  679. }
  680. /*start the queue so we can respond with error to requests */
  681. /* wakeup anyone waiting for startup complete */
  682. schedule_work(&skdev->start_queue);
  683. skdev->gendisk_on = -1;
  684. wake_up_interruptible(&skdev->waitq);
  685. break;
  686. case SKD_DRVR_STATE_RESUMING:
  687. case SKD_DRVR_STATE_STOPPING:
  688. case SKD_DRVR_STATE_SYNCING:
  689. case SKD_DRVR_STATE_FAULT:
  690. case SKD_DRVR_STATE_DISAPPEARED:
  691. default:
  692. break;
  693. }
  694. }
  695. static int skd_start_timer(struct skd_device *skdev)
  696. {
  697. int rc;
  698. timer_setup(&skdev->timer, skd_timer_tick, 0);
  699. rc = mod_timer(&skdev->timer, (jiffies + HZ));
  700. if (rc)
  701. dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
  702. return rc;
  703. }
  704. static void skd_kill_timer(struct skd_device *skdev)
  705. {
  706. del_timer_sync(&skdev->timer);
  707. }
  708. /*
  709. *****************************************************************************
  710. * INTERNAL REQUESTS -- generated by driver itself
  711. *****************************************************************************
  712. */
  713. static int skd_format_internal_skspcl(struct skd_device *skdev)
  714. {
  715. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  716. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  717. struct fit_msg_hdr *fmh;
  718. uint64_t dma_address;
  719. struct skd_scsi_request *scsi;
  720. fmh = &skspcl->msg_buf->fmh;
  721. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  722. fmh->num_protocol_cmds_coalesced = 1;
  723. scsi = &skspcl->msg_buf->scsi[0];
  724. memset(scsi, 0, sizeof(*scsi));
  725. dma_address = skspcl->req.sksg_dma_address;
  726. scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
  727. skspcl->req.n_sg = 1;
  728. sgd->control = FIT_SGD_CONTROL_LAST;
  729. sgd->byte_count = 0;
  730. sgd->host_side_addr = skspcl->db_dma_address;
  731. sgd->dev_side_addr = 0;
  732. sgd->next_desc_ptr = 0LL;
  733. return 1;
  734. }
  735. #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
  736. static void skd_send_internal_skspcl(struct skd_device *skdev,
  737. struct skd_special_context *skspcl,
  738. u8 opcode)
  739. {
  740. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  741. struct skd_scsi_request *scsi;
  742. unsigned char *buf = skspcl->data_buf;
  743. int i;
  744. if (skspcl->req.state != SKD_REQ_STATE_IDLE)
  745. /*
  746. * A refresh is already in progress.
  747. * Just wait for it to finish.
  748. */
  749. return;
  750. skspcl->req.state = SKD_REQ_STATE_BUSY;
  751. scsi = &skspcl->msg_buf->scsi[0];
  752. scsi->hdr.tag = skspcl->req.id;
  753. memset(scsi->cdb, 0, sizeof(scsi->cdb));
  754. switch (opcode) {
  755. case TEST_UNIT_READY:
  756. scsi->cdb[0] = TEST_UNIT_READY;
  757. sgd->byte_count = 0;
  758. scsi->hdr.sg_list_len_bytes = 0;
  759. break;
  760. case READ_CAPACITY:
  761. scsi->cdb[0] = READ_CAPACITY;
  762. sgd->byte_count = SKD_N_READ_CAP_BYTES;
  763. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  764. break;
  765. case INQUIRY:
  766. scsi->cdb[0] = INQUIRY;
  767. scsi->cdb[1] = 0x01; /* evpd */
  768. scsi->cdb[2] = 0x80; /* serial number page */
  769. scsi->cdb[4] = 0x10;
  770. sgd->byte_count = 16;
  771. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  772. break;
  773. case SYNCHRONIZE_CACHE:
  774. scsi->cdb[0] = SYNCHRONIZE_CACHE;
  775. sgd->byte_count = 0;
  776. scsi->hdr.sg_list_len_bytes = 0;
  777. break;
  778. case WRITE_BUFFER:
  779. scsi->cdb[0] = WRITE_BUFFER;
  780. scsi->cdb[1] = 0x02;
  781. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  782. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  783. sgd->byte_count = WR_BUF_SIZE;
  784. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  785. /* fill incrementing byte pattern */
  786. for (i = 0; i < sgd->byte_count; i++)
  787. buf[i] = i & 0xFF;
  788. break;
  789. case READ_BUFFER:
  790. scsi->cdb[0] = READ_BUFFER;
  791. scsi->cdb[1] = 0x02;
  792. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  793. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  794. sgd->byte_count = WR_BUF_SIZE;
  795. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  796. memset(skspcl->data_buf, 0, sgd->byte_count);
  797. break;
  798. default:
  799. SKD_ASSERT("Don't know what to send");
  800. return;
  801. }
  802. skd_send_special_fitmsg(skdev, skspcl);
  803. }
  804. static void skd_refresh_device_data(struct skd_device *skdev)
  805. {
  806. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  807. skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
  808. }
  809. static int skd_chk_read_buf(struct skd_device *skdev,
  810. struct skd_special_context *skspcl)
  811. {
  812. unsigned char *buf = skspcl->data_buf;
  813. int i;
  814. /* check for incrementing byte pattern */
  815. for (i = 0; i < WR_BUF_SIZE; i++)
  816. if (buf[i] != (i & 0xFF))
  817. return 1;
  818. return 0;
  819. }
  820. static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
  821. u8 code, u8 qual, u8 fruc)
  822. {
  823. /* If the check condition is of special interest, log a message */
  824. if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
  825. && (code == 0x04) && (qual == 0x06)) {
  826. dev_err(&skdev->pdev->dev,
  827. "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
  828. key, code, qual, fruc);
  829. }
  830. }
  831. static void skd_complete_internal(struct skd_device *skdev,
  832. struct fit_completion_entry_v1 *skcomp,
  833. struct fit_comp_error_info *skerr,
  834. struct skd_special_context *skspcl)
  835. {
  836. u8 *buf = skspcl->data_buf;
  837. u8 status;
  838. int i;
  839. struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
  840. lockdep_assert_held(&skdev->lock);
  841. SKD_ASSERT(skspcl == &skdev->internal_skspcl);
  842. dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
  843. dma_sync_single_for_cpu(&skdev->pdev->dev,
  844. skspcl->db_dma_address,
  845. skspcl->req.sksg_list[0].byte_count,
  846. DMA_BIDIRECTIONAL);
  847. skspcl->req.completion = *skcomp;
  848. skspcl->req.state = SKD_REQ_STATE_IDLE;
  849. status = skspcl->req.completion.status;
  850. skd_log_check_status(skdev, status, skerr->key, skerr->code,
  851. skerr->qual, skerr->fruc);
  852. switch (scsi->cdb[0]) {
  853. case TEST_UNIT_READY:
  854. if (status == SAM_STAT_GOOD)
  855. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  856. else if ((status == SAM_STAT_CHECK_CONDITION) &&
  857. (skerr->key == MEDIUM_ERROR))
  858. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  859. else {
  860. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  861. dev_dbg(&skdev->pdev->dev,
  862. "TUR failed, don't send anymore state 0x%x\n",
  863. skdev->state);
  864. return;
  865. }
  866. dev_dbg(&skdev->pdev->dev,
  867. "**** TUR failed, retry skerr\n");
  868. skd_send_internal_skspcl(skdev, skspcl,
  869. TEST_UNIT_READY);
  870. }
  871. break;
  872. case WRITE_BUFFER:
  873. if (status == SAM_STAT_GOOD)
  874. skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
  875. else {
  876. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  877. dev_dbg(&skdev->pdev->dev,
  878. "write buffer failed, don't send anymore state 0x%x\n",
  879. skdev->state);
  880. return;
  881. }
  882. dev_dbg(&skdev->pdev->dev,
  883. "**** write buffer failed, retry skerr\n");
  884. skd_send_internal_skspcl(skdev, skspcl,
  885. TEST_UNIT_READY);
  886. }
  887. break;
  888. case READ_BUFFER:
  889. if (status == SAM_STAT_GOOD) {
  890. if (skd_chk_read_buf(skdev, skspcl) == 0)
  891. skd_send_internal_skspcl(skdev, skspcl,
  892. READ_CAPACITY);
  893. else {
  894. dev_err(&skdev->pdev->dev,
  895. "*** W/R Buffer mismatch %d ***\n",
  896. skdev->connect_retries);
  897. if (skdev->connect_retries <
  898. SKD_MAX_CONNECT_RETRIES) {
  899. skdev->connect_retries++;
  900. skd_soft_reset(skdev);
  901. } else {
  902. dev_err(&skdev->pdev->dev,
  903. "W/R Buffer Connect Error\n");
  904. return;
  905. }
  906. }
  907. } else {
  908. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  909. dev_dbg(&skdev->pdev->dev,
  910. "read buffer failed, don't send anymore state 0x%x\n",
  911. skdev->state);
  912. return;
  913. }
  914. dev_dbg(&skdev->pdev->dev,
  915. "**** read buffer failed, retry skerr\n");
  916. skd_send_internal_skspcl(skdev, skspcl,
  917. TEST_UNIT_READY);
  918. }
  919. break;
  920. case READ_CAPACITY:
  921. skdev->read_cap_is_valid = 0;
  922. if (status == SAM_STAT_GOOD) {
  923. skdev->read_cap_last_lba =
  924. (buf[0] << 24) | (buf[1] << 16) |
  925. (buf[2] << 8) | buf[3];
  926. skdev->read_cap_blocksize =
  927. (buf[4] << 24) | (buf[5] << 16) |
  928. (buf[6] << 8) | buf[7];
  929. dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
  930. skdev->read_cap_last_lba,
  931. skdev->read_cap_blocksize);
  932. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  933. skdev->read_cap_is_valid = 1;
  934. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  935. } else if ((status == SAM_STAT_CHECK_CONDITION) &&
  936. (skerr->key == MEDIUM_ERROR)) {
  937. skdev->read_cap_last_lba = ~0;
  938. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  939. dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
  940. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  941. } else {
  942. dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
  943. skd_send_internal_skspcl(skdev, skspcl,
  944. TEST_UNIT_READY);
  945. }
  946. break;
  947. case INQUIRY:
  948. skdev->inquiry_is_valid = 0;
  949. if (status == SAM_STAT_GOOD) {
  950. skdev->inquiry_is_valid = 1;
  951. for (i = 0; i < 12; i++)
  952. skdev->inq_serial_num[i] = buf[i + 4];
  953. skdev->inq_serial_num[12] = 0;
  954. }
  955. if (skd_unquiesce_dev(skdev) < 0)
  956. dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
  957. /* connection is complete */
  958. skdev->connect_retries = 0;
  959. break;
  960. case SYNCHRONIZE_CACHE:
  961. if (status == SAM_STAT_GOOD)
  962. skdev->sync_done = 1;
  963. else
  964. skdev->sync_done = -1;
  965. wake_up_interruptible(&skdev->waitq);
  966. break;
  967. default:
  968. SKD_ASSERT("we didn't send this");
  969. }
  970. }
  971. /*
  972. *****************************************************************************
  973. * FIT MESSAGES
  974. *****************************************************************************
  975. */
  976. static void skd_send_fitmsg(struct skd_device *skdev,
  977. struct skd_fitmsg_context *skmsg)
  978. {
  979. u64 qcmd;
  980. dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n",
  981. &skmsg->mb_dma_address, skd_in_flight(skdev));
  982. dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
  983. qcmd = skmsg->mb_dma_address;
  984. qcmd |= FIT_QCMD_QID_NORMAL;
  985. if (unlikely(skdev->dbg_level > 1)) {
  986. u8 *bp = (u8 *)skmsg->msg_buf;
  987. int i;
  988. for (i = 0; i < skmsg->length; i += 8) {
  989. dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
  990. &bp[i]);
  991. if (i == 0)
  992. i = 64 - 8;
  993. }
  994. }
  995. if (skmsg->length > 256)
  996. qcmd |= FIT_QCMD_MSGSIZE_512;
  997. else if (skmsg->length > 128)
  998. qcmd |= FIT_QCMD_MSGSIZE_256;
  999. else if (skmsg->length > 64)
  1000. qcmd |= FIT_QCMD_MSGSIZE_128;
  1001. else
  1002. /*
  1003. * This makes no sense because the FIT msg header is
  1004. * 64 bytes. If the msg is only 64 bytes long it has
  1005. * no payload.
  1006. */
  1007. qcmd |= FIT_QCMD_MSGSIZE_64;
  1008. dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
  1009. skmsg->length, DMA_TO_DEVICE);
  1010. /* Make sure skd_msg_buf is written before the doorbell is triggered. */
  1011. smp_wmb();
  1012. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1013. }
  1014. static void skd_send_special_fitmsg(struct skd_device *skdev,
  1015. struct skd_special_context *skspcl)
  1016. {
  1017. u64 qcmd;
  1018. WARN_ON_ONCE(skspcl->req.n_sg != 1);
  1019. if (unlikely(skdev->dbg_level > 1)) {
  1020. u8 *bp = (u8 *)skspcl->msg_buf;
  1021. int i;
  1022. for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
  1023. dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
  1024. &bp[i]);
  1025. if (i == 0)
  1026. i = 64 - 8;
  1027. }
  1028. dev_dbg(&skdev->pdev->dev,
  1029. "skspcl=%p id=%04x sksg_list=%p sksg_dma=%pad\n",
  1030. skspcl, skspcl->req.id, skspcl->req.sksg_list,
  1031. &skspcl->req.sksg_dma_address);
  1032. for (i = 0; i < skspcl->req.n_sg; i++) {
  1033. struct fit_sg_descriptor *sgd =
  1034. &skspcl->req.sksg_list[i];
  1035. dev_dbg(&skdev->pdev->dev,
  1036. " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
  1037. i, sgd->byte_count, sgd->control,
  1038. sgd->host_side_addr, sgd->next_desc_ptr);
  1039. }
  1040. }
  1041. /*
  1042. * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
  1043. * and one 64-byte SSDI command.
  1044. */
  1045. qcmd = skspcl->mb_dma_address;
  1046. qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
  1047. dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
  1048. SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
  1049. dma_sync_single_for_device(&skdev->pdev->dev,
  1050. skspcl->req.sksg_dma_address,
  1051. 1 * sizeof(struct fit_sg_descriptor),
  1052. DMA_TO_DEVICE);
  1053. dma_sync_single_for_device(&skdev->pdev->dev,
  1054. skspcl->db_dma_address,
  1055. skspcl->req.sksg_list[0].byte_count,
  1056. DMA_BIDIRECTIONAL);
  1057. /* Make sure skd_msg_buf is written before the doorbell is triggered. */
  1058. smp_wmb();
  1059. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1060. }
  1061. /*
  1062. *****************************************************************************
  1063. * COMPLETION QUEUE
  1064. *****************************************************************************
  1065. */
  1066. static void skd_complete_other(struct skd_device *skdev,
  1067. struct fit_completion_entry_v1 *skcomp,
  1068. struct fit_comp_error_info *skerr);
  1069. struct sns_info {
  1070. u8 type;
  1071. u8 stat;
  1072. u8 key;
  1073. u8 asc;
  1074. u8 ascq;
  1075. u8 mask;
  1076. enum skd_check_status_action action;
  1077. };
  1078. static struct sns_info skd_chkstat_table[] = {
  1079. /* Good */
  1080. { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
  1081. SKD_CHECK_STATUS_REPORT_GOOD },
  1082. /* Smart alerts */
  1083. { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
  1084. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1085. { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
  1086. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1087. { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
  1088. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1089. /* Retry (with limits) */
  1090. { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
  1091. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1092. { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
  1093. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1094. { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
  1095. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1096. { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
  1097. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1098. /* Busy (or about to be) */
  1099. { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
  1100. SKD_CHECK_STATUS_BUSY_IMMINENT },
  1101. };
  1102. /*
  1103. * Look up status and sense data to decide how to handle the error
  1104. * from the device.
  1105. * mask says which fields must match e.g., mask=0x18 means check
  1106. * type and stat, ignore key, asc, ascq.
  1107. */
  1108. static enum skd_check_status_action
  1109. skd_check_status(struct skd_device *skdev,
  1110. u8 cmp_status, struct fit_comp_error_info *skerr)
  1111. {
  1112. int i;
  1113. dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
  1114. skerr->key, skerr->code, skerr->qual, skerr->fruc);
  1115. dev_dbg(&skdev->pdev->dev,
  1116. "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
  1117. skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
  1118. skerr->fruc);
  1119. /* Does the info match an entry in the good category? */
  1120. for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
  1121. struct sns_info *sns = &skd_chkstat_table[i];
  1122. if (sns->mask & 0x10)
  1123. if (skerr->type != sns->type)
  1124. continue;
  1125. if (sns->mask & 0x08)
  1126. if (cmp_status != sns->stat)
  1127. continue;
  1128. if (sns->mask & 0x04)
  1129. if (skerr->key != sns->key)
  1130. continue;
  1131. if (sns->mask & 0x02)
  1132. if (skerr->code != sns->asc)
  1133. continue;
  1134. if (sns->mask & 0x01)
  1135. if (skerr->qual != sns->ascq)
  1136. continue;
  1137. if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
  1138. dev_err(&skdev->pdev->dev,
  1139. "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
  1140. skerr->key, skerr->code, skerr->qual);
  1141. }
  1142. return sns->action;
  1143. }
  1144. /* No other match, so nonzero status means error,
  1145. * zero status means good
  1146. */
  1147. if (cmp_status) {
  1148. dev_dbg(&skdev->pdev->dev, "status check: error\n");
  1149. return SKD_CHECK_STATUS_REPORT_ERROR;
  1150. }
  1151. dev_dbg(&skdev->pdev->dev, "status check good default\n");
  1152. return SKD_CHECK_STATUS_REPORT_GOOD;
  1153. }
  1154. static void skd_resolve_req_exception(struct skd_device *skdev,
  1155. struct skd_request_context *skreq,
  1156. struct request *req)
  1157. {
  1158. u8 cmp_status = skreq->completion.status;
  1159. switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
  1160. case SKD_CHECK_STATUS_REPORT_GOOD:
  1161. case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
  1162. skreq->status = BLK_STS_OK;
  1163. blk_mq_complete_request(req);
  1164. break;
  1165. case SKD_CHECK_STATUS_BUSY_IMMINENT:
  1166. skd_log_skreq(skdev, skreq, "retry(busy)");
  1167. blk_mq_requeue_request(req, true);
  1168. dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
  1169. skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
  1170. skdev->timer_countdown = SKD_TIMER_MINUTES(20);
  1171. skd_quiesce_dev(skdev);
  1172. break;
  1173. case SKD_CHECK_STATUS_REQUEUE_REQUEST:
  1174. if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
  1175. skd_log_skreq(skdev, skreq, "retry");
  1176. blk_mq_requeue_request(req, true);
  1177. break;
  1178. }
  1179. /* fall through */
  1180. case SKD_CHECK_STATUS_REPORT_ERROR:
  1181. default:
  1182. skreq->status = BLK_STS_IOERR;
  1183. blk_mq_complete_request(req);
  1184. break;
  1185. }
  1186. }
  1187. static void skd_release_skreq(struct skd_device *skdev,
  1188. struct skd_request_context *skreq)
  1189. {
  1190. /*
  1191. * Reclaim the skd_request_context
  1192. */
  1193. skreq->state = SKD_REQ_STATE_IDLE;
  1194. }
  1195. static int skd_isr_completion_posted(struct skd_device *skdev,
  1196. int limit, int *enqueued)
  1197. {
  1198. struct fit_completion_entry_v1 *skcmp;
  1199. struct fit_comp_error_info *skerr;
  1200. u16 req_id;
  1201. u32 tag;
  1202. u16 hwq = 0;
  1203. struct request *rq;
  1204. struct skd_request_context *skreq;
  1205. u16 cmp_cntxt;
  1206. u8 cmp_status;
  1207. u8 cmp_cycle;
  1208. u32 cmp_bytes;
  1209. int rc = 0;
  1210. int processed = 0;
  1211. lockdep_assert_held(&skdev->lock);
  1212. for (;; ) {
  1213. SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
  1214. skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
  1215. cmp_cycle = skcmp->cycle;
  1216. cmp_cntxt = skcmp->tag;
  1217. cmp_status = skcmp->status;
  1218. cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
  1219. skerr = &skdev->skerr_table[skdev->skcomp_ix];
  1220. dev_dbg(&skdev->pdev->dev,
  1221. "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
  1222. skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
  1223. cmp_cntxt, cmp_status, skd_in_flight(skdev),
  1224. cmp_bytes, skdev->proto_ver);
  1225. if (cmp_cycle != skdev->skcomp_cycle) {
  1226. dev_dbg(&skdev->pdev->dev, "end of completions\n");
  1227. break;
  1228. }
  1229. /*
  1230. * Update the completion queue head index and possibly
  1231. * the completion cycle count. 8-bit wrap-around.
  1232. */
  1233. skdev->skcomp_ix++;
  1234. if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
  1235. skdev->skcomp_ix = 0;
  1236. skdev->skcomp_cycle++;
  1237. }
  1238. /*
  1239. * The command context is a unique 32-bit ID. The low order
  1240. * bits help locate the request. The request is usually a
  1241. * r/w request (see skd_start() above) or a special request.
  1242. */
  1243. req_id = cmp_cntxt;
  1244. tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
  1245. /* Is this other than a r/w request? */
  1246. if (tag >= skdev->num_req_context) {
  1247. /*
  1248. * This is not a completion for a r/w request.
  1249. */
  1250. WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
  1251. tag));
  1252. skd_complete_other(skdev, skcmp, skerr);
  1253. continue;
  1254. }
  1255. rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
  1256. if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
  1257. tag))
  1258. continue;
  1259. skreq = blk_mq_rq_to_pdu(rq);
  1260. /*
  1261. * Make sure the request ID for the slot matches.
  1262. */
  1263. if (skreq->id != req_id) {
  1264. dev_err(&skdev->pdev->dev,
  1265. "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
  1266. req_id, skreq->id, cmp_cntxt);
  1267. continue;
  1268. }
  1269. SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
  1270. skreq->completion = *skcmp;
  1271. if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
  1272. skreq->err_info = *skerr;
  1273. skd_log_check_status(skdev, cmp_status, skerr->key,
  1274. skerr->code, skerr->qual,
  1275. skerr->fruc);
  1276. }
  1277. /* Release DMA resources for the request. */
  1278. if (skreq->n_sg > 0)
  1279. skd_postop_sg_list(skdev, skreq);
  1280. skd_release_skreq(skdev, skreq);
  1281. /*
  1282. * Capture the outcome and post it back to the native request.
  1283. */
  1284. if (likely(cmp_status == SAM_STAT_GOOD)) {
  1285. skreq->status = BLK_STS_OK;
  1286. blk_mq_complete_request(rq);
  1287. } else {
  1288. skd_resolve_req_exception(skdev, skreq, rq);
  1289. }
  1290. /* skd_isr_comp_limit equal zero means no limit */
  1291. if (limit) {
  1292. if (++processed >= limit) {
  1293. rc = 1;
  1294. break;
  1295. }
  1296. }
  1297. }
  1298. if (skdev->state == SKD_DRVR_STATE_PAUSING &&
  1299. skd_in_flight(skdev) == 0) {
  1300. skdev->state = SKD_DRVR_STATE_PAUSED;
  1301. wake_up_interruptible(&skdev->waitq);
  1302. }
  1303. return rc;
  1304. }
  1305. static void skd_complete_other(struct skd_device *skdev,
  1306. struct fit_completion_entry_v1 *skcomp,
  1307. struct fit_comp_error_info *skerr)
  1308. {
  1309. u32 req_id = 0;
  1310. u32 req_table;
  1311. u32 req_slot;
  1312. struct skd_special_context *skspcl;
  1313. lockdep_assert_held(&skdev->lock);
  1314. req_id = skcomp->tag;
  1315. req_table = req_id & SKD_ID_TABLE_MASK;
  1316. req_slot = req_id & SKD_ID_SLOT_MASK;
  1317. dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
  1318. req_id, req_slot);
  1319. /*
  1320. * Based on the request id, determine how to dispatch this completion.
  1321. * This swich/case is finding the good cases and forwarding the
  1322. * completion entry. Errors are reported below the switch.
  1323. */
  1324. switch (req_table) {
  1325. case SKD_ID_RW_REQUEST:
  1326. /*
  1327. * The caller, skd_isr_completion_posted() above,
  1328. * handles r/w requests. The only way we get here
  1329. * is if the req_slot is out of bounds.
  1330. */
  1331. break;
  1332. case SKD_ID_INTERNAL:
  1333. if (req_slot == 0) {
  1334. skspcl = &skdev->internal_skspcl;
  1335. if (skspcl->req.id == req_id &&
  1336. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  1337. skd_complete_internal(skdev,
  1338. skcomp, skerr, skspcl);
  1339. return;
  1340. }
  1341. }
  1342. break;
  1343. case SKD_ID_FIT_MSG:
  1344. /*
  1345. * These id's should never appear in a completion record.
  1346. */
  1347. break;
  1348. default:
  1349. /*
  1350. * These id's should never appear anywhere;
  1351. */
  1352. break;
  1353. }
  1354. /*
  1355. * If we get here it is a bad or stale id.
  1356. */
  1357. }
  1358. static void skd_reset_skcomp(struct skd_device *skdev)
  1359. {
  1360. memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
  1361. skdev->skcomp_ix = 0;
  1362. skdev->skcomp_cycle = 1;
  1363. }
  1364. /*
  1365. *****************************************************************************
  1366. * INTERRUPTS
  1367. *****************************************************************************
  1368. */
  1369. static void skd_completion_worker(struct work_struct *work)
  1370. {
  1371. struct skd_device *skdev =
  1372. container_of(work, struct skd_device, completion_worker);
  1373. unsigned long flags;
  1374. int flush_enqueued = 0;
  1375. spin_lock_irqsave(&skdev->lock, flags);
  1376. /*
  1377. * pass in limit=0, which means no limit..
  1378. * process everything in compq
  1379. */
  1380. skd_isr_completion_posted(skdev, 0, &flush_enqueued);
  1381. schedule_work(&skdev->start_queue);
  1382. spin_unlock_irqrestore(&skdev->lock, flags);
  1383. }
  1384. static void skd_isr_msg_from_dev(struct skd_device *skdev);
  1385. static irqreturn_t
  1386. skd_isr(int irq, void *ptr)
  1387. {
  1388. struct skd_device *skdev = ptr;
  1389. u32 intstat;
  1390. u32 ack;
  1391. int rc = 0;
  1392. int deferred = 0;
  1393. int flush_enqueued = 0;
  1394. spin_lock(&skdev->lock);
  1395. for (;; ) {
  1396. intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  1397. ack = FIT_INT_DEF_MASK;
  1398. ack &= intstat;
  1399. dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
  1400. ack);
  1401. /* As long as there is an int pending on device, keep
  1402. * running loop. When none, get out, but if we've never
  1403. * done any processing, call completion handler?
  1404. */
  1405. if (ack == 0) {
  1406. /* No interrupts on device, but run the completion
  1407. * processor anyway?
  1408. */
  1409. if (rc == 0)
  1410. if (likely (skdev->state
  1411. == SKD_DRVR_STATE_ONLINE))
  1412. deferred = 1;
  1413. break;
  1414. }
  1415. rc = IRQ_HANDLED;
  1416. SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
  1417. if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
  1418. (skdev->state != SKD_DRVR_STATE_STOPPING))) {
  1419. if (intstat & FIT_ISH_COMPLETION_POSTED) {
  1420. /*
  1421. * If we have already deferred completion
  1422. * processing, don't bother running it again
  1423. */
  1424. if (deferred == 0)
  1425. deferred =
  1426. skd_isr_completion_posted(skdev,
  1427. skd_isr_comp_limit, &flush_enqueued);
  1428. }
  1429. if (intstat & FIT_ISH_FW_STATE_CHANGE) {
  1430. skd_isr_fwstate(skdev);
  1431. if (skdev->state == SKD_DRVR_STATE_FAULT ||
  1432. skdev->state ==
  1433. SKD_DRVR_STATE_DISAPPEARED) {
  1434. spin_unlock(&skdev->lock);
  1435. return rc;
  1436. }
  1437. }
  1438. if (intstat & FIT_ISH_MSG_FROM_DEV)
  1439. skd_isr_msg_from_dev(skdev);
  1440. }
  1441. }
  1442. if (unlikely(flush_enqueued))
  1443. schedule_work(&skdev->start_queue);
  1444. if (deferred)
  1445. schedule_work(&skdev->completion_worker);
  1446. else if (!flush_enqueued)
  1447. schedule_work(&skdev->start_queue);
  1448. spin_unlock(&skdev->lock);
  1449. return rc;
  1450. }
  1451. static void skd_drive_fault(struct skd_device *skdev)
  1452. {
  1453. skdev->state = SKD_DRVR_STATE_FAULT;
  1454. dev_err(&skdev->pdev->dev, "Drive FAULT\n");
  1455. }
  1456. static void skd_drive_disappeared(struct skd_device *skdev)
  1457. {
  1458. skdev->state = SKD_DRVR_STATE_DISAPPEARED;
  1459. dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
  1460. }
  1461. static void skd_isr_fwstate(struct skd_device *skdev)
  1462. {
  1463. u32 sense;
  1464. u32 state;
  1465. u32 mtd;
  1466. int prev_driver_state = skdev->state;
  1467. sense = SKD_READL(skdev, FIT_STATUS);
  1468. state = sense & FIT_SR_DRIVE_STATE_MASK;
  1469. dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
  1470. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  1471. skd_drive_state_to_str(state), state);
  1472. skdev->drive_state = state;
  1473. switch (skdev->drive_state) {
  1474. case FIT_SR_DRIVE_INIT:
  1475. if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
  1476. skd_disable_interrupts(skdev);
  1477. break;
  1478. }
  1479. if (skdev->state == SKD_DRVR_STATE_RESTARTING)
  1480. skd_recover_requests(skdev);
  1481. if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
  1482. skdev->timer_countdown = SKD_STARTING_TIMO;
  1483. skdev->state = SKD_DRVR_STATE_STARTING;
  1484. skd_soft_reset(skdev);
  1485. break;
  1486. }
  1487. mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
  1488. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1489. skdev->last_mtd = mtd;
  1490. break;
  1491. case FIT_SR_DRIVE_ONLINE:
  1492. skdev->cur_max_queue_depth = skd_max_queue_depth;
  1493. if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
  1494. skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
  1495. skdev->queue_low_water_mark =
  1496. skdev->cur_max_queue_depth * 2 / 3 + 1;
  1497. if (skdev->queue_low_water_mark < 1)
  1498. skdev->queue_low_water_mark = 1;
  1499. dev_info(&skdev->pdev->dev,
  1500. "Queue depth limit=%d dev=%d lowat=%d\n",
  1501. skdev->cur_max_queue_depth,
  1502. skdev->dev_max_queue_depth,
  1503. skdev->queue_low_water_mark);
  1504. skd_refresh_device_data(skdev);
  1505. break;
  1506. case FIT_SR_DRIVE_BUSY:
  1507. skdev->state = SKD_DRVR_STATE_BUSY;
  1508. skdev->timer_countdown = SKD_BUSY_TIMO;
  1509. skd_quiesce_dev(skdev);
  1510. break;
  1511. case FIT_SR_DRIVE_BUSY_SANITIZE:
  1512. /* set timer for 3 seconds, we'll abort any unfinished
  1513. * commands after that expires
  1514. */
  1515. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  1516. skdev->timer_countdown = SKD_TIMER_SECONDS(3);
  1517. schedule_work(&skdev->start_queue);
  1518. break;
  1519. case FIT_SR_DRIVE_BUSY_ERASE:
  1520. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  1521. skdev->timer_countdown = SKD_BUSY_TIMO;
  1522. break;
  1523. case FIT_SR_DRIVE_OFFLINE:
  1524. skdev->state = SKD_DRVR_STATE_IDLE;
  1525. break;
  1526. case FIT_SR_DRIVE_SOFT_RESET:
  1527. switch (skdev->state) {
  1528. case SKD_DRVR_STATE_STARTING:
  1529. case SKD_DRVR_STATE_RESTARTING:
  1530. /* Expected by a caller of skd_soft_reset() */
  1531. break;
  1532. default:
  1533. skdev->state = SKD_DRVR_STATE_RESTARTING;
  1534. break;
  1535. }
  1536. break;
  1537. case FIT_SR_DRIVE_FW_BOOTING:
  1538. dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
  1539. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  1540. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  1541. break;
  1542. case FIT_SR_DRIVE_DEGRADED:
  1543. case FIT_SR_PCIE_LINK_DOWN:
  1544. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  1545. break;
  1546. case FIT_SR_DRIVE_FAULT:
  1547. skd_drive_fault(skdev);
  1548. skd_recover_requests(skdev);
  1549. schedule_work(&skdev->start_queue);
  1550. break;
  1551. /* PCIe bus returned all Fs? */
  1552. case 0xFF:
  1553. dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
  1554. sense);
  1555. skd_drive_disappeared(skdev);
  1556. skd_recover_requests(skdev);
  1557. schedule_work(&skdev->start_queue);
  1558. break;
  1559. default:
  1560. /*
  1561. * Uknown FW State. Wait for a state we recognize.
  1562. */
  1563. break;
  1564. }
  1565. dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
  1566. skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
  1567. skd_skdev_state_to_str(skdev->state), skdev->state);
  1568. }
  1569. static void skd_recover_request(struct request *req, void *data, bool reserved)
  1570. {
  1571. struct skd_device *const skdev = data;
  1572. struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
  1573. if (skreq->state != SKD_REQ_STATE_BUSY)
  1574. return;
  1575. skd_log_skreq(skdev, skreq, "recover");
  1576. /* Release DMA resources for the request. */
  1577. if (skreq->n_sg > 0)
  1578. skd_postop_sg_list(skdev, skreq);
  1579. skreq->state = SKD_REQ_STATE_IDLE;
  1580. skreq->status = BLK_STS_IOERR;
  1581. blk_mq_complete_request(req);
  1582. }
  1583. static void skd_recover_requests(struct skd_device *skdev)
  1584. {
  1585. blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
  1586. }
  1587. static void skd_isr_msg_from_dev(struct skd_device *skdev)
  1588. {
  1589. u32 mfd;
  1590. u32 mtd;
  1591. u32 data;
  1592. mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  1593. dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
  1594. skdev->last_mtd);
  1595. /* ignore any mtd that is an ack for something we didn't send */
  1596. if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
  1597. return;
  1598. switch (FIT_MXD_TYPE(mfd)) {
  1599. case FIT_MTD_FITFW_INIT:
  1600. skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
  1601. if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
  1602. dev_err(&skdev->pdev->dev, "protocol mismatch\n");
  1603. dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
  1604. skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
  1605. dev_err(&skdev->pdev->dev, " please upgrade driver\n");
  1606. skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
  1607. skd_soft_reset(skdev);
  1608. break;
  1609. }
  1610. mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
  1611. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1612. skdev->last_mtd = mtd;
  1613. break;
  1614. case FIT_MTD_GET_CMDQ_DEPTH:
  1615. skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
  1616. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
  1617. SKD_N_COMPLETION_ENTRY);
  1618. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1619. skdev->last_mtd = mtd;
  1620. break;
  1621. case FIT_MTD_SET_COMPQ_DEPTH:
  1622. SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
  1623. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
  1624. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1625. skdev->last_mtd = mtd;
  1626. break;
  1627. case FIT_MTD_SET_COMPQ_ADDR:
  1628. skd_reset_skcomp(skdev);
  1629. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
  1630. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1631. skdev->last_mtd = mtd;
  1632. break;
  1633. case FIT_MTD_CMD_LOG_HOST_ID:
  1634. /* hardware interface overflows in y2106 */
  1635. skdev->connect_time_stamp = (u32)ktime_get_real_seconds();
  1636. data = skdev->connect_time_stamp & 0xFFFF;
  1637. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
  1638. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1639. skdev->last_mtd = mtd;
  1640. break;
  1641. case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
  1642. skdev->drive_jiffies = FIT_MXD_DATA(mfd);
  1643. data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
  1644. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
  1645. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1646. skdev->last_mtd = mtd;
  1647. break;
  1648. case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
  1649. skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
  1650. mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
  1651. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  1652. skdev->last_mtd = mtd;
  1653. dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
  1654. skdev->connect_time_stamp, skdev->drive_jiffies);
  1655. break;
  1656. case FIT_MTD_ARM_QUEUE:
  1657. skdev->last_mtd = 0;
  1658. /*
  1659. * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
  1660. */
  1661. break;
  1662. default:
  1663. break;
  1664. }
  1665. }
  1666. static void skd_disable_interrupts(struct skd_device *skdev)
  1667. {
  1668. u32 sense;
  1669. sense = SKD_READL(skdev, FIT_CONTROL);
  1670. sense &= ~FIT_CR_ENABLE_INTERRUPTS;
  1671. SKD_WRITEL(skdev, sense, FIT_CONTROL);
  1672. dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
  1673. /* Note that the 1s is written. A 1-bit means
  1674. * disable, a 0 means enable.
  1675. */
  1676. SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
  1677. }
  1678. static void skd_enable_interrupts(struct skd_device *skdev)
  1679. {
  1680. u32 val;
  1681. /* unmask interrupts first */
  1682. val = FIT_ISH_FW_STATE_CHANGE +
  1683. FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
  1684. /* Note that the compliment of mask is written. A 1-bit means
  1685. * disable, a 0 means enable. */
  1686. SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
  1687. dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
  1688. val = SKD_READL(skdev, FIT_CONTROL);
  1689. val |= FIT_CR_ENABLE_INTERRUPTS;
  1690. dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
  1691. SKD_WRITEL(skdev, val, FIT_CONTROL);
  1692. }
  1693. /*
  1694. *****************************************************************************
  1695. * START, STOP, RESTART, QUIESCE, UNQUIESCE
  1696. *****************************************************************************
  1697. */
  1698. static void skd_soft_reset(struct skd_device *skdev)
  1699. {
  1700. u32 val;
  1701. val = SKD_READL(skdev, FIT_CONTROL);
  1702. val |= (FIT_CR_SOFT_RESET);
  1703. dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
  1704. SKD_WRITEL(skdev, val, FIT_CONTROL);
  1705. }
  1706. static void skd_start_device(struct skd_device *skdev)
  1707. {
  1708. unsigned long flags;
  1709. u32 sense;
  1710. u32 state;
  1711. spin_lock_irqsave(&skdev->lock, flags);
  1712. /* ack all ghost interrupts */
  1713. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  1714. sense = SKD_READL(skdev, FIT_STATUS);
  1715. dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
  1716. state = sense & FIT_SR_DRIVE_STATE_MASK;
  1717. skdev->drive_state = state;
  1718. skdev->last_mtd = 0;
  1719. skdev->state = SKD_DRVR_STATE_STARTING;
  1720. skdev->timer_countdown = SKD_STARTING_TIMO;
  1721. skd_enable_interrupts(skdev);
  1722. switch (skdev->drive_state) {
  1723. case FIT_SR_DRIVE_OFFLINE:
  1724. dev_err(&skdev->pdev->dev, "Drive offline...\n");
  1725. break;
  1726. case FIT_SR_DRIVE_FW_BOOTING:
  1727. dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
  1728. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  1729. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  1730. break;
  1731. case FIT_SR_DRIVE_BUSY_SANITIZE:
  1732. dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
  1733. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  1734. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  1735. break;
  1736. case FIT_SR_DRIVE_BUSY_ERASE:
  1737. dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
  1738. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  1739. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  1740. break;
  1741. case FIT_SR_DRIVE_INIT:
  1742. case FIT_SR_DRIVE_ONLINE:
  1743. skd_soft_reset(skdev);
  1744. break;
  1745. case FIT_SR_DRIVE_BUSY:
  1746. dev_err(&skdev->pdev->dev, "Drive Busy...\n");
  1747. skdev->state = SKD_DRVR_STATE_BUSY;
  1748. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  1749. break;
  1750. case FIT_SR_DRIVE_SOFT_RESET:
  1751. dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
  1752. break;
  1753. case FIT_SR_DRIVE_FAULT:
  1754. /* Fault state is bad...soft reset won't do it...
  1755. * Hard reset, maybe, but does it work on device?
  1756. * For now, just fault so the system doesn't hang.
  1757. */
  1758. skd_drive_fault(skdev);
  1759. /*start the queue so we can respond with error to requests */
  1760. dev_dbg(&skdev->pdev->dev, "starting queue\n");
  1761. schedule_work(&skdev->start_queue);
  1762. skdev->gendisk_on = -1;
  1763. wake_up_interruptible(&skdev->waitq);
  1764. break;
  1765. case 0xFF:
  1766. /* Most likely the device isn't there or isn't responding
  1767. * to the BAR1 addresses. */
  1768. skd_drive_disappeared(skdev);
  1769. /*start the queue so we can respond with error to requests */
  1770. dev_dbg(&skdev->pdev->dev,
  1771. "starting queue to error-out reqs\n");
  1772. schedule_work(&skdev->start_queue);
  1773. skdev->gendisk_on = -1;
  1774. wake_up_interruptible(&skdev->waitq);
  1775. break;
  1776. default:
  1777. dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
  1778. skdev->drive_state);
  1779. break;
  1780. }
  1781. state = SKD_READL(skdev, FIT_CONTROL);
  1782. dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
  1783. state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  1784. dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
  1785. state = SKD_READL(skdev, FIT_INT_MASK_HOST);
  1786. dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
  1787. state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  1788. dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
  1789. state = SKD_READL(skdev, FIT_HW_VERSION);
  1790. dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
  1791. spin_unlock_irqrestore(&skdev->lock, flags);
  1792. }
  1793. static void skd_stop_device(struct skd_device *skdev)
  1794. {
  1795. unsigned long flags;
  1796. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1797. u32 dev_state;
  1798. int i;
  1799. spin_lock_irqsave(&skdev->lock, flags);
  1800. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  1801. dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
  1802. goto stop_out;
  1803. }
  1804. if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
  1805. dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
  1806. goto stop_out;
  1807. }
  1808. skdev->state = SKD_DRVR_STATE_SYNCING;
  1809. skdev->sync_done = 0;
  1810. skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
  1811. spin_unlock_irqrestore(&skdev->lock, flags);
  1812. wait_event_interruptible_timeout(skdev->waitq,
  1813. (skdev->sync_done), (10 * HZ));
  1814. spin_lock_irqsave(&skdev->lock, flags);
  1815. switch (skdev->sync_done) {
  1816. case 0:
  1817. dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
  1818. break;
  1819. case 1:
  1820. dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
  1821. break;
  1822. default:
  1823. dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
  1824. }
  1825. stop_out:
  1826. skdev->state = SKD_DRVR_STATE_STOPPING;
  1827. spin_unlock_irqrestore(&skdev->lock, flags);
  1828. skd_kill_timer(skdev);
  1829. spin_lock_irqsave(&skdev->lock, flags);
  1830. skd_disable_interrupts(skdev);
  1831. /* ensure all ints on device are cleared */
  1832. /* soft reset the device to unload with a clean slate */
  1833. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  1834. SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
  1835. spin_unlock_irqrestore(&skdev->lock, flags);
  1836. /* poll every 100ms, 1 second timeout */
  1837. for (i = 0; i < 10; i++) {
  1838. dev_state =
  1839. SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
  1840. if (dev_state == FIT_SR_DRIVE_INIT)
  1841. break;
  1842. set_current_state(TASK_INTERRUPTIBLE);
  1843. schedule_timeout(msecs_to_jiffies(100));
  1844. }
  1845. if (dev_state != FIT_SR_DRIVE_INIT)
  1846. dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
  1847. dev_state);
  1848. }
  1849. /* assume spinlock is held */
  1850. static void skd_restart_device(struct skd_device *skdev)
  1851. {
  1852. u32 state;
  1853. /* ack all ghost interrupts */
  1854. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  1855. state = SKD_READL(skdev, FIT_STATUS);
  1856. dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
  1857. state &= FIT_SR_DRIVE_STATE_MASK;
  1858. skdev->drive_state = state;
  1859. skdev->last_mtd = 0;
  1860. skdev->state = SKD_DRVR_STATE_RESTARTING;
  1861. skdev->timer_countdown = SKD_RESTARTING_TIMO;
  1862. skd_soft_reset(skdev);
  1863. }
  1864. /* assume spinlock is held */
  1865. static int skd_quiesce_dev(struct skd_device *skdev)
  1866. {
  1867. int rc = 0;
  1868. switch (skdev->state) {
  1869. case SKD_DRVR_STATE_BUSY:
  1870. case SKD_DRVR_STATE_BUSY_IMMINENT:
  1871. dev_dbg(&skdev->pdev->dev, "stopping queue\n");
  1872. blk_mq_stop_hw_queues(skdev->queue);
  1873. break;
  1874. case SKD_DRVR_STATE_ONLINE:
  1875. case SKD_DRVR_STATE_STOPPING:
  1876. case SKD_DRVR_STATE_SYNCING:
  1877. case SKD_DRVR_STATE_PAUSING:
  1878. case SKD_DRVR_STATE_PAUSED:
  1879. case SKD_DRVR_STATE_STARTING:
  1880. case SKD_DRVR_STATE_RESTARTING:
  1881. case SKD_DRVR_STATE_RESUMING:
  1882. default:
  1883. rc = -EINVAL;
  1884. dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
  1885. skdev->state);
  1886. }
  1887. return rc;
  1888. }
  1889. /* assume spinlock is held */
  1890. static int skd_unquiesce_dev(struct skd_device *skdev)
  1891. {
  1892. int prev_driver_state = skdev->state;
  1893. skd_log_skdev(skdev, "unquiesce");
  1894. if (skdev->state == SKD_DRVR_STATE_ONLINE) {
  1895. dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
  1896. return 0;
  1897. }
  1898. if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
  1899. /*
  1900. * If there has been an state change to other than
  1901. * ONLINE, we will rely on controller state change
  1902. * to come back online and restart the queue.
  1903. * The BUSY state means that driver is ready to
  1904. * continue normal processing but waiting for controller
  1905. * to become available.
  1906. */
  1907. skdev->state = SKD_DRVR_STATE_BUSY;
  1908. dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
  1909. return 0;
  1910. }
  1911. /*
  1912. * Drive has just come online, driver is either in startup,
  1913. * paused performing a task, or bust waiting for hardware.
  1914. */
  1915. switch (skdev->state) {
  1916. case SKD_DRVR_STATE_PAUSED:
  1917. case SKD_DRVR_STATE_BUSY:
  1918. case SKD_DRVR_STATE_BUSY_IMMINENT:
  1919. case SKD_DRVR_STATE_BUSY_ERASE:
  1920. case SKD_DRVR_STATE_STARTING:
  1921. case SKD_DRVR_STATE_RESTARTING:
  1922. case SKD_DRVR_STATE_FAULT:
  1923. case SKD_DRVR_STATE_IDLE:
  1924. case SKD_DRVR_STATE_LOAD:
  1925. skdev->state = SKD_DRVR_STATE_ONLINE;
  1926. dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
  1927. skd_skdev_state_to_str(prev_driver_state),
  1928. prev_driver_state, skd_skdev_state_to_str(skdev->state),
  1929. skdev->state);
  1930. dev_dbg(&skdev->pdev->dev,
  1931. "**** device ONLINE...starting block queue\n");
  1932. dev_dbg(&skdev->pdev->dev, "starting queue\n");
  1933. dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
  1934. schedule_work(&skdev->start_queue);
  1935. skdev->gendisk_on = 1;
  1936. wake_up_interruptible(&skdev->waitq);
  1937. break;
  1938. case SKD_DRVR_STATE_DISAPPEARED:
  1939. default:
  1940. dev_dbg(&skdev->pdev->dev,
  1941. "**** driver state %d, not implemented\n",
  1942. skdev->state);
  1943. return -EBUSY;
  1944. }
  1945. return 0;
  1946. }
  1947. /*
  1948. *****************************************************************************
  1949. * PCIe MSI/MSI-X INTERRUPT HANDLERS
  1950. *****************************************************************************
  1951. */
  1952. static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
  1953. {
  1954. struct skd_device *skdev = skd_host_data;
  1955. unsigned long flags;
  1956. spin_lock_irqsave(&skdev->lock, flags);
  1957. dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
  1958. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  1959. dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
  1960. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  1961. SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
  1962. spin_unlock_irqrestore(&skdev->lock, flags);
  1963. return IRQ_HANDLED;
  1964. }
  1965. static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
  1966. {
  1967. struct skd_device *skdev = skd_host_data;
  1968. unsigned long flags;
  1969. spin_lock_irqsave(&skdev->lock, flags);
  1970. dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
  1971. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  1972. SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
  1973. skd_isr_fwstate(skdev);
  1974. spin_unlock_irqrestore(&skdev->lock, flags);
  1975. return IRQ_HANDLED;
  1976. }
  1977. static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
  1978. {
  1979. struct skd_device *skdev = skd_host_data;
  1980. unsigned long flags;
  1981. int flush_enqueued = 0;
  1982. int deferred;
  1983. spin_lock_irqsave(&skdev->lock, flags);
  1984. dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
  1985. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  1986. SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
  1987. deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
  1988. &flush_enqueued);
  1989. if (flush_enqueued)
  1990. schedule_work(&skdev->start_queue);
  1991. if (deferred)
  1992. schedule_work(&skdev->completion_worker);
  1993. else if (!flush_enqueued)
  1994. schedule_work(&skdev->start_queue);
  1995. spin_unlock_irqrestore(&skdev->lock, flags);
  1996. return IRQ_HANDLED;
  1997. }
  1998. static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
  1999. {
  2000. struct skd_device *skdev = skd_host_data;
  2001. unsigned long flags;
  2002. spin_lock_irqsave(&skdev->lock, flags);
  2003. dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
  2004. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  2005. SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
  2006. skd_isr_msg_from_dev(skdev);
  2007. spin_unlock_irqrestore(&skdev->lock, flags);
  2008. return IRQ_HANDLED;
  2009. }
  2010. static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
  2011. {
  2012. struct skd_device *skdev = skd_host_data;
  2013. unsigned long flags;
  2014. spin_lock_irqsave(&skdev->lock, flags);
  2015. dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
  2016. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  2017. SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
  2018. spin_unlock_irqrestore(&skdev->lock, flags);
  2019. return IRQ_HANDLED;
  2020. }
  2021. /*
  2022. *****************************************************************************
  2023. * PCIe MSI/MSI-X SETUP
  2024. *****************************************************************************
  2025. */
  2026. struct skd_msix_entry {
  2027. char isr_name[30];
  2028. };
  2029. struct skd_init_msix_entry {
  2030. const char *name;
  2031. irq_handler_t handler;
  2032. };
  2033. #define SKD_MAX_MSIX_COUNT 13
  2034. #define SKD_MIN_MSIX_COUNT 7
  2035. #define SKD_BASE_MSIX_IRQ 4
  2036. static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
  2037. { "(DMA 0)", skd_reserved_isr },
  2038. { "(DMA 1)", skd_reserved_isr },
  2039. { "(DMA 2)", skd_reserved_isr },
  2040. { "(DMA 3)", skd_reserved_isr },
  2041. { "(State Change)", skd_statec_isr },
  2042. { "(COMPL_Q)", skd_comp_q },
  2043. { "(MSG)", skd_msg_isr },
  2044. { "(Reserved)", skd_reserved_isr },
  2045. { "(Reserved)", skd_reserved_isr },
  2046. { "(Queue Full 0)", skd_qfull_isr },
  2047. { "(Queue Full 1)", skd_qfull_isr },
  2048. { "(Queue Full 2)", skd_qfull_isr },
  2049. { "(Queue Full 3)", skd_qfull_isr },
  2050. };
  2051. static int skd_acquire_msix(struct skd_device *skdev)
  2052. {
  2053. int i, rc;
  2054. struct pci_dev *pdev = skdev->pdev;
  2055. rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
  2056. PCI_IRQ_MSIX);
  2057. if (rc < 0) {
  2058. dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
  2059. goto out;
  2060. }
  2061. skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
  2062. sizeof(struct skd_msix_entry), GFP_KERNEL);
  2063. if (!skdev->msix_entries) {
  2064. rc = -ENOMEM;
  2065. dev_err(&skdev->pdev->dev, "msix table allocation error\n");
  2066. goto out;
  2067. }
  2068. /* Enable MSI-X vectors for the base queue */
  2069. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
  2070. struct skd_msix_entry *qentry = &skdev->msix_entries[i];
  2071. snprintf(qentry->isr_name, sizeof(qentry->isr_name),
  2072. "%s%d-msix %s", DRV_NAME, skdev->devno,
  2073. msix_entries[i].name);
  2074. rc = devm_request_irq(&skdev->pdev->dev,
  2075. pci_irq_vector(skdev->pdev, i),
  2076. msix_entries[i].handler, 0,
  2077. qentry->isr_name, skdev);
  2078. if (rc) {
  2079. dev_err(&skdev->pdev->dev,
  2080. "Unable to register(%d) MSI-X handler %d: %s\n",
  2081. rc, i, qentry->isr_name);
  2082. goto msix_out;
  2083. }
  2084. }
  2085. dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
  2086. SKD_MAX_MSIX_COUNT);
  2087. return 0;
  2088. msix_out:
  2089. while (--i >= 0)
  2090. devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
  2091. out:
  2092. kfree(skdev->msix_entries);
  2093. skdev->msix_entries = NULL;
  2094. return rc;
  2095. }
  2096. static int skd_acquire_irq(struct skd_device *skdev)
  2097. {
  2098. struct pci_dev *pdev = skdev->pdev;
  2099. unsigned int irq_flag = PCI_IRQ_LEGACY;
  2100. int rc;
  2101. if (skd_isr_type == SKD_IRQ_MSIX) {
  2102. rc = skd_acquire_msix(skdev);
  2103. if (!rc)
  2104. return 0;
  2105. dev_err(&skdev->pdev->dev,
  2106. "failed to enable MSI-X, re-trying with MSI %d\n", rc);
  2107. }
  2108. snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
  2109. skdev->devno);
  2110. if (skd_isr_type != SKD_IRQ_LEGACY)
  2111. irq_flag |= PCI_IRQ_MSI;
  2112. rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
  2113. if (rc < 0) {
  2114. dev_err(&skdev->pdev->dev,
  2115. "failed to allocate the MSI interrupt %d\n", rc);
  2116. return rc;
  2117. }
  2118. rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
  2119. pdev->msi_enabled ? 0 : IRQF_SHARED,
  2120. skdev->isr_name, skdev);
  2121. if (rc) {
  2122. pci_free_irq_vectors(pdev);
  2123. dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
  2124. rc);
  2125. return rc;
  2126. }
  2127. return 0;
  2128. }
  2129. static void skd_release_irq(struct skd_device *skdev)
  2130. {
  2131. struct pci_dev *pdev = skdev->pdev;
  2132. if (skdev->msix_entries) {
  2133. int i;
  2134. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
  2135. devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
  2136. skdev);
  2137. }
  2138. kfree(skdev->msix_entries);
  2139. skdev->msix_entries = NULL;
  2140. } else {
  2141. devm_free_irq(&pdev->dev, pdev->irq, skdev);
  2142. }
  2143. pci_free_irq_vectors(pdev);
  2144. }
  2145. /*
  2146. *****************************************************************************
  2147. * CONSTRUCT
  2148. *****************************************************************************
  2149. */
  2150. static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
  2151. dma_addr_t *dma_handle, gfp_t gfp,
  2152. enum dma_data_direction dir)
  2153. {
  2154. struct device *dev = &skdev->pdev->dev;
  2155. void *buf;
  2156. buf = kmem_cache_alloc(s, gfp);
  2157. if (!buf)
  2158. return NULL;
  2159. *dma_handle = dma_map_single(dev, buf,
  2160. kmem_cache_size(s), dir);
  2161. if (dma_mapping_error(dev, *dma_handle)) {
  2162. kmem_cache_free(s, buf);
  2163. buf = NULL;
  2164. }
  2165. return buf;
  2166. }
  2167. static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
  2168. void *vaddr, dma_addr_t dma_handle,
  2169. enum dma_data_direction dir)
  2170. {
  2171. if (!vaddr)
  2172. return;
  2173. dma_unmap_single(&skdev->pdev->dev, dma_handle,
  2174. kmem_cache_size(s), dir);
  2175. kmem_cache_free(s, vaddr);
  2176. }
  2177. static int skd_cons_skcomp(struct skd_device *skdev)
  2178. {
  2179. int rc = 0;
  2180. struct fit_completion_entry_v1 *skcomp;
  2181. dev_dbg(&skdev->pdev->dev,
  2182. "comp pci_alloc, total bytes %zd entries %d\n",
  2183. SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
  2184. skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
  2185. &skdev->cq_dma_address, GFP_KERNEL);
  2186. if (skcomp == NULL) {
  2187. rc = -ENOMEM;
  2188. goto err_out;
  2189. }
  2190. skdev->skcomp_table = skcomp;
  2191. skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
  2192. sizeof(*skcomp) *
  2193. SKD_N_COMPLETION_ENTRY);
  2194. err_out:
  2195. return rc;
  2196. }
  2197. static int skd_cons_skmsg(struct skd_device *skdev)
  2198. {
  2199. int rc = 0;
  2200. u32 i;
  2201. dev_dbg(&skdev->pdev->dev,
  2202. "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
  2203. sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
  2204. sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
  2205. skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
  2206. sizeof(struct skd_fitmsg_context),
  2207. GFP_KERNEL);
  2208. if (skdev->skmsg_table == NULL) {
  2209. rc = -ENOMEM;
  2210. goto err_out;
  2211. }
  2212. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  2213. struct skd_fitmsg_context *skmsg;
  2214. skmsg = &skdev->skmsg_table[i];
  2215. skmsg->id = i + SKD_ID_FIT_MSG;
  2216. skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev,
  2217. SKD_N_FITMSG_BYTES,
  2218. &skmsg->mb_dma_address,
  2219. GFP_KERNEL);
  2220. if (skmsg->msg_buf == NULL) {
  2221. rc = -ENOMEM;
  2222. goto err_out;
  2223. }
  2224. WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
  2225. (FIT_QCMD_ALIGN - 1),
  2226. "not aligned: msg_buf %p mb_dma_address %pad\n",
  2227. skmsg->msg_buf, &skmsg->mb_dma_address);
  2228. memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
  2229. }
  2230. err_out:
  2231. return rc;
  2232. }
  2233. static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
  2234. u32 n_sg,
  2235. dma_addr_t *ret_dma_addr)
  2236. {
  2237. struct fit_sg_descriptor *sg_list;
  2238. sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
  2239. GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
  2240. if (sg_list != NULL) {
  2241. uint64_t dma_address = *ret_dma_addr;
  2242. u32 i;
  2243. for (i = 0; i < n_sg - 1; i++) {
  2244. uint64_t ndp_off;
  2245. ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
  2246. sg_list[i].next_desc_ptr = dma_address + ndp_off;
  2247. }
  2248. sg_list[i].next_desc_ptr = 0LL;
  2249. }
  2250. return sg_list;
  2251. }
  2252. static void skd_free_sg_list(struct skd_device *skdev,
  2253. struct fit_sg_descriptor *sg_list,
  2254. dma_addr_t dma_addr)
  2255. {
  2256. if (WARN_ON_ONCE(!sg_list))
  2257. return;
  2258. skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
  2259. DMA_TO_DEVICE);
  2260. }
  2261. static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
  2262. unsigned int hctx_idx, unsigned int numa_node)
  2263. {
  2264. struct skd_device *skdev = set->driver_data;
  2265. struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
  2266. skreq->state = SKD_REQ_STATE_IDLE;
  2267. skreq->sg = (void *)(skreq + 1);
  2268. sg_init_table(skreq->sg, skd_sgs_per_request);
  2269. skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
  2270. &skreq->sksg_dma_address);
  2271. return skreq->sksg_list ? 0 : -ENOMEM;
  2272. }
  2273. static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
  2274. unsigned int hctx_idx)
  2275. {
  2276. struct skd_device *skdev = set->driver_data;
  2277. struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
  2278. skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
  2279. }
  2280. static int skd_cons_sksb(struct skd_device *skdev)
  2281. {
  2282. int rc = 0;
  2283. struct skd_special_context *skspcl;
  2284. skspcl = &skdev->internal_skspcl;
  2285. skspcl->req.id = 0 + SKD_ID_INTERNAL;
  2286. skspcl->req.state = SKD_REQ_STATE_IDLE;
  2287. skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
  2288. &skspcl->db_dma_address,
  2289. GFP_DMA | __GFP_ZERO,
  2290. DMA_BIDIRECTIONAL);
  2291. if (skspcl->data_buf == NULL) {
  2292. rc = -ENOMEM;
  2293. goto err_out;
  2294. }
  2295. skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
  2296. &skspcl->mb_dma_address,
  2297. GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
  2298. if (skspcl->msg_buf == NULL) {
  2299. rc = -ENOMEM;
  2300. goto err_out;
  2301. }
  2302. skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
  2303. &skspcl->req.sksg_dma_address);
  2304. if (skspcl->req.sksg_list == NULL) {
  2305. rc = -ENOMEM;
  2306. goto err_out;
  2307. }
  2308. if (!skd_format_internal_skspcl(skdev)) {
  2309. rc = -EINVAL;
  2310. goto err_out;
  2311. }
  2312. err_out:
  2313. return rc;
  2314. }
  2315. static const struct blk_mq_ops skd_mq_ops = {
  2316. .queue_rq = skd_mq_queue_rq,
  2317. .complete = skd_complete_rq,
  2318. .timeout = skd_timed_out,
  2319. .init_request = skd_init_request,
  2320. .exit_request = skd_exit_request,
  2321. };
  2322. static int skd_cons_disk(struct skd_device *skdev)
  2323. {
  2324. int rc = 0;
  2325. struct gendisk *disk;
  2326. struct request_queue *q;
  2327. unsigned long flags;
  2328. disk = alloc_disk(SKD_MINORS_PER_DEVICE);
  2329. if (!disk) {
  2330. rc = -ENOMEM;
  2331. goto err_out;
  2332. }
  2333. skdev->disk = disk;
  2334. sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
  2335. disk->major = skdev->major;
  2336. disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
  2337. disk->fops = &skd_blockdev_ops;
  2338. disk->private_data = skdev;
  2339. memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
  2340. skdev->tag_set.ops = &skd_mq_ops;
  2341. skdev->tag_set.nr_hw_queues = 1;
  2342. skdev->tag_set.queue_depth = skd_max_queue_depth;
  2343. skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
  2344. skdev->sgs_per_request * sizeof(struct scatterlist);
  2345. skdev->tag_set.numa_node = NUMA_NO_NODE;
  2346. skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
  2347. BLK_MQ_F_SG_MERGE |
  2348. BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
  2349. skdev->tag_set.driver_data = skdev;
  2350. rc = blk_mq_alloc_tag_set(&skdev->tag_set);
  2351. if (rc)
  2352. goto err_out;
  2353. q = blk_mq_init_queue(&skdev->tag_set);
  2354. if (IS_ERR(q)) {
  2355. blk_mq_free_tag_set(&skdev->tag_set);
  2356. rc = PTR_ERR(q);
  2357. goto err_out;
  2358. }
  2359. q->queuedata = skdev;
  2360. skdev->queue = q;
  2361. disk->queue = q;
  2362. blk_queue_write_cache(q, true, true);
  2363. blk_queue_max_segments(q, skdev->sgs_per_request);
  2364. blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
  2365. /* set optimal I/O size to 8KB */
  2366. blk_queue_io_opt(q, 8192);
  2367. blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
  2368. blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
  2369. blk_queue_rq_timeout(q, 8 * HZ);
  2370. spin_lock_irqsave(&skdev->lock, flags);
  2371. dev_dbg(&skdev->pdev->dev, "stopping queue\n");
  2372. blk_mq_stop_hw_queues(skdev->queue);
  2373. spin_unlock_irqrestore(&skdev->lock, flags);
  2374. err_out:
  2375. return rc;
  2376. }
  2377. #define SKD_N_DEV_TABLE 16u
  2378. static u32 skd_next_devno;
  2379. static struct skd_device *skd_construct(struct pci_dev *pdev)
  2380. {
  2381. struct skd_device *skdev;
  2382. int blk_major = skd_major;
  2383. size_t size;
  2384. int rc;
  2385. skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
  2386. if (!skdev) {
  2387. dev_err(&pdev->dev, "memory alloc failure\n");
  2388. return NULL;
  2389. }
  2390. skdev->state = SKD_DRVR_STATE_LOAD;
  2391. skdev->pdev = pdev;
  2392. skdev->devno = skd_next_devno++;
  2393. skdev->major = blk_major;
  2394. skdev->dev_max_queue_depth = 0;
  2395. skdev->num_req_context = skd_max_queue_depth;
  2396. skdev->num_fitmsg_context = skd_max_queue_depth;
  2397. skdev->cur_max_queue_depth = 1;
  2398. skdev->queue_low_water_mark = 1;
  2399. skdev->proto_ver = 99;
  2400. skdev->sgs_per_request = skd_sgs_per_request;
  2401. skdev->dbg_level = skd_dbg_level;
  2402. spin_lock_init(&skdev->lock);
  2403. INIT_WORK(&skdev->start_queue, skd_start_queue);
  2404. INIT_WORK(&skdev->completion_worker, skd_completion_worker);
  2405. size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
  2406. skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
  2407. SLAB_HWCACHE_ALIGN, NULL);
  2408. if (!skdev->msgbuf_cache)
  2409. goto err_out;
  2410. WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
  2411. "skd-msgbuf: %d < %zd\n",
  2412. kmem_cache_size(skdev->msgbuf_cache), size);
  2413. size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
  2414. skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
  2415. SLAB_HWCACHE_ALIGN, NULL);
  2416. if (!skdev->sglist_cache)
  2417. goto err_out;
  2418. WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
  2419. "skd-sglist: %d < %zd\n",
  2420. kmem_cache_size(skdev->sglist_cache), size);
  2421. size = SKD_N_INTERNAL_BYTES;
  2422. skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
  2423. SLAB_HWCACHE_ALIGN, NULL);
  2424. if (!skdev->databuf_cache)
  2425. goto err_out;
  2426. WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
  2427. "skd-databuf: %d < %zd\n",
  2428. kmem_cache_size(skdev->databuf_cache), size);
  2429. dev_dbg(&skdev->pdev->dev, "skcomp\n");
  2430. rc = skd_cons_skcomp(skdev);
  2431. if (rc < 0)
  2432. goto err_out;
  2433. dev_dbg(&skdev->pdev->dev, "skmsg\n");
  2434. rc = skd_cons_skmsg(skdev);
  2435. if (rc < 0)
  2436. goto err_out;
  2437. dev_dbg(&skdev->pdev->dev, "sksb\n");
  2438. rc = skd_cons_sksb(skdev);
  2439. if (rc < 0)
  2440. goto err_out;
  2441. dev_dbg(&skdev->pdev->dev, "disk\n");
  2442. rc = skd_cons_disk(skdev);
  2443. if (rc < 0)
  2444. goto err_out;
  2445. dev_dbg(&skdev->pdev->dev, "VICTORY\n");
  2446. return skdev;
  2447. err_out:
  2448. dev_dbg(&skdev->pdev->dev, "construct failed\n");
  2449. skd_destruct(skdev);
  2450. return NULL;
  2451. }
  2452. /*
  2453. *****************************************************************************
  2454. * DESTRUCT (FREE)
  2455. *****************************************************************************
  2456. */
  2457. static void skd_free_skcomp(struct skd_device *skdev)
  2458. {
  2459. if (skdev->skcomp_table)
  2460. dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
  2461. skdev->skcomp_table, skdev->cq_dma_address);
  2462. skdev->skcomp_table = NULL;
  2463. skdev->cq_dma_address = 0;
  2464. }
  2465. static void skd_free_skmsg(struct skd_device *skdev)
  2466. {
  2467. u32 i;
  2468. if (skdev->skmsg_table == NULL)
  2469. return;
  2470. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  2471. struct skd_fitmsg_context *skmsg;
  2472. skmsg = &skdev->skmsg_table[i];
  2473. if (skmsg->msg_buf != NULL) {
  2474. dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES,
  2475. skmsg->msg_buf,
  2476. skmsg->mb_dma_address);
  2477. }
  2478. skmsg->msg_buf = NULL;
  2479. skmsg->mb_dma_address = 0;
  2480. }
  2481. kfree(skdev->skmsg_table);
  2482. skdev->skmsg_table = NULL;
  2483. }
  2484. static void skd_free_sksb(struct skd_device *skdev)
  2485. {
  2486. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  2487. skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
  2488. skspcl->db_dma_address, DMA_BIDIRECTIONAL);
  2489. skspcl->data_buf = NULL;
  2490. skspcl->db_dma_address = 0;
  2491. skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
  2492. skspcl->mb_dma_address, DMA_TO_DEVICE);
  2493. skspcl->msg_buf = NULL;
  2494. skspcl->mb_dma_address = 0;
  2495. skd_free_sg_list(skdev, skspcl->req.sksg_list,
  2496. skspcl->req.sksg_dma_address);
  2497. skspcl->req.sksg_list = NULL;
  2498. skspcl->req.sksg_dma_address = 0;
  2499. }
  2500. static void skd_free_disk(struct skd_device *skdev)
  2501. {
  2502. struct gendisk *disk = skdev->disk;
  2503. if (disk && (disk->flags & GENHD_FL_UP))
  2504. del_gendisk(disk);
  2505. if (skdev->queue) {
  2506. blk_cleanup_queue(skdev->queue);
  2507. skdev->queue = NULL;
  2508. if (disk)
  2509. disk->queue = NULL;
  2510. }
  2511. if (skdev->tag_set.tags)
  2512. blk_mq_free_tag_set(&skdev->tag_set);
  2513. put_disk(disk);
  2514. skdev->disk = NULL;
  2515. }
  2516. static void skd_destruct(struct skd_device *skdev)
  2517. {
  2518. if (skdev == NULL)
  2519. return;
  2520. cancel_work_sync(&skdev->start_queue);
  2521. dev_dbg(&skdev->pdev->dev, "disk\n");
  2522. skd_free_disk(skdev);
  2523. dev_dbg(&skdev->pdev->dev, "sksb\n");
  2524. skd_free_sksb(skdev);
  2525. dev_dbg(&skdev->pdev->dev, "skmsg\n");
  2526. skd_free_skmsg(skdev);
  2527. dev_dbg(&skdev->pdev->dev, "skcomp\n");
  2528. skd_free_skcomp(skdev);
  2529. kmem_cache_destroy(skdev->databuf_cache);
  2530. kmem_cache_destroy(skdev->sglist_cache);
  2531. kmem_cache_destroy(skdev->msgbuf_cache);
  2532. dev_dbg(&skdev->pdev->dev, "skdev\n");
  2533. kfree(skdev);
  2534. }
  2535. /*
  2536. *****************************************************************************
  2537. * BLOCK DEVICE (BDEV) GLUE
  2538. *****************************************************************************
  2539. */
  2540. static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  2541. {
  2542. struct skd_device *skdev;
  2543. u64 capacity;
  2544. skdev = bdev->bd_disk->private_data;
  2545. dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
  2546. bdev->bd_disk->disk_name, current->comm);
  2547. if (skdev->read_cap_is_valid) {
  2548. capacity = get_capacity(skdev->disk);
  2549. geo->heads = 64;
  2550. geo->sectors = 255;
  2551. geo->cylinders = (capacity) / (255 * 64);
  2552. return 0;
  2553. }
  2554. return -EIO;
  2555. }
  2556. static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
  2557. {
  2558. dev_dbg(&skdev->pdev->dev, "add_disk\n");
  2559. device_add_disk(parent, skdev->disk, NULL);
  2560. return 0;
  2561. }
  2562. static const struct block_device_operations skd_blockdev_ops = {
  2563. .owner = THIS_MODULE,
  2564. .getgeo = skd_bdev_getgeo,
  2565. };
  2566. /*
  2567. *****************************************************************************
  2568. * PCIe DRIVER GLUE
  2569. *****************************************************************************
  2570. */
  2571. static const struct pci_device_id skd_pci_tbl[] = {
  2572. { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
  2573. PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
  2574. { 0 } /* terminate list */
  2575. };
  2576. MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
  2577. static char *skd_pci_info(struct skd_device *skdev, char *str)
  2578. {
  2579. int pcie_reg;
  2580. strcpy(str, "PCIe (");
  2581. pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
  2582. if (pcie_reg) {
  2583. char lwstr[6];
  2584. uint16_t pcie_lstat, lspeed, lwidth;
  2585. pcie_reg += 0x12;
  2586. pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
  2587. lspeed = pcie_lstat & (0xF);
  2588. lwidth = (pcie_lstat & 0x3F0) >> 4;
  2589. if (lspeed == 1)
  2590. strcat(str, "2.5GT/s ");
  2591. else if (lspeed == 2)
  2592. strcat(str, "5.0GT/s ");
  2593. else
  2594. strcat(str, "<unknown> ");
  2595. snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
  2596. strcat(str, lwstr);
  2597. }
  2598. return str;
  2599. }
  2600. static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  2601. {
  2602. int i;
  2603. int rc = 0;
  2604. char pci_str[32];
  2605. struct skd_device *skdev;
  2606. dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
  2607. pdev->device);
  2608. rc = pci_enable_device(pdev);
  2609. if (rc)
  2610. return rc;
  2611. rc = pci_request_regions(pdev, DRV_NAME);
  2612. if (rc)
  2613. goto err_out;
  2614. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2615. if (rc)
  2616. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  2617. if (rc) {
  2618. dev_err(&pdev->dev, "DMA mask error %d\n", rc);
  2619. goto err_out_regions;
  2620. }
  2621. if (!skd_major) {
  2622. rc = register_blkdev(0, DRV_NAME);
  2623. if (rc < 0)
  2624. goto err_out_regions;
  2625. BUG_ON(!rc);
  2626. skd_major = rc;
  2627. }
  2628. skdev = skd_construct(pdev);
  2629. if (skdev == NULL) {
  2630. rc = -ENOMEM;
  2631. goto err_out_regions;
  2632. }
  2633. skd_pci_info(skdev, pci_str);
  2634. dev_info(&pdev->dev, "%s 64bit\n", pci_str);
  2635. pci_set_master(pdev);
  2636. rc = pci_enable_pcie_error_reporting(pdev);
  2637. if (rc) {
  2638. dev_err(&pdev->dev,
  2639. "bad enable of PCIe error reporting rc=%d\n", rc);
  2640. skdev->pcie_error_reporting_is_enabled = 0;
  2641. } else
  2642. skdev->pcie_error_reporting_is_enabled = 1;
  2643. pci_set_drvdata(pdev, skdev);
  2644. for (i = 0; i < SKD_MAX_BARS; i++) {
  2645. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  2646. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  2647. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  2648. skdev->mem_size[i]);
  2649. if (!skdev->mem_map[i]) {
  2650. dev_err(&pdev->dev,
  2651. "Unable to map adapter memory!\n");
  2652. rc = -ENODEV;
  2653. goto err_out_iounmap;
  2654. }
  2655. dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
  2656. skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
  2657. skdev->mem_size[i]);
  2658. }
  2659. rc = skd_acquire_irq(skdev);
  2660. if (rc) {
  2661. dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
  2662. goto err_out_iounmap;
  2663. }
  2664. rc = skd_start_timer(skdev);
  2665. if (rc)
  2666. goto err_out_timer;
  2667. init_waitqueue_head(&skdev->waitq);
  2668. skd_start_device(skdev);
  2669. rc = wait_event_interruptible_timeout(skdev->waitq,
  2670. (skdev->gendisk_on),
  2671. (SKD_START_WAIT_SECONDS * HZ));
  2672. if (skdev->gendisk_on > 0) {
  2673. /* device came on-line after reset */
  2674. skd_bdev_attach(&pdev->dev, skdev);
  2675. rc = 0;
  2676. } else {
  2677. /* we timed out, something is wrong with the device,
  2678. don't add the disk structure */
  2679. dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
  2680. rc);
  2681. /* in case of no error; we timeout with ENXIO */
  2682. if (!rc)
  2683. rc = -ENXIO;
  2684. goto err_out_timer;
  2685. }
  2686. return rc;
  2687. err_out_timer:
  2688. skd_stop_device(skdev);
  2689. skd_release_irq(skdev);
  2690. err_out_iounmap:
  2691. for (i = 0; i < SKD_MAX_BARS; i++)
  2692. if (skdev->mem_map[i])
  2693. iounmap(skdev->mem_map[i]);
  2694. if (skdev->pcie_error_reporting_is_enabled)
  2695. pci_disable_pcie_error_reporting(pdev);
  2696. skd_destruct(skdev);
  2697. err_out_regions:
  2698. pci_release_regions(pdev);
  2699. err_out:
  2700. pci_disable_device(pdev);
  2701. pci_set_drvdata(pdev, NULL);
  2702. return rc;
  2703. }
  2704. static void skd_pci_remove(struct pci_dev *pdev)
  2705. {
  2706. int i;
  2707. struct skd_device *skdev;
  2708. skdev = pci_get_drvdata(pdev);
  2709. if (!skdev) {
  2710. dev_err(&pdev->dev, "no device data for PCI\n");
  2711. return;
  2712. }
  2713. skd_stop_device(skdev);
  2714. skd_release_irq(skdev);
  2715. for (i = 0; i < SKD_MAX_BARS; i++)
  2716. if (skdev->mem_map[i])
  2717. iounmap(skdev->mem_map[i]);
  2718. if (skdev->pcie_error_reporting_is_enabled)
  2719. pci_disable_pcie_error_reporting(pdev);
  2720. skd_destruct(skdev);
  2721. pci_release_regions(pdev);
  2722. pci_disable_device(pdev);
  2723. pci_set_drvdata(pdev, NULL);
  2724. return;
  2725. }
  2726. static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  2727. {
  2728. int i;
  2729. struct skd_device *skdev;
  2730. skdev = pci_get_drvdata(pdev);
  2731. if (!skdev) {
  2732. dev_err(&pdev->dev, "no device data for PCI\n");
  2733. return -EIO;
  2734. }
  2735. skd_stop_device(skdev);
  2736. skd_release_irq(skdev);
  2737. for (i = 0; i < SKD_MAX_BARS; i++)
  2738. if (skdev->mem_map[i])
  2739. iounmap(skdev->mem_map[i]);
  2740. if (skdev->pcie_error_reporting_is_enabled)
  2741. pci_disable_pcie_error_reporting(pdev);
  2742. pci_release_regions(pdev);
  2743. pci_save_state(pdev);
  2744. pci_disable_device(pdev);
  2745. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  2746. return 0;
  2747. }
  2748. static int skd_pci_resume(struct pci_dev *pdev)
  2749. {
  2750. int i;
  2751. int rc = 0;
  2752. struct skd_device *skdev;
  2753. skdev = pci_get_drvdata(pdev);
  2754. if (!skdev) {
  2755. dev_err(&pdev->dev, "no device data for PCI\n");
  2756. return -1;
  2757. }
  2758. pci_set_power_state(pdev, PCI_D0);
  2759. pci_enable_wake(pdev, PCI_D0, 0);
  2760. pci_restore_state(pdev);
  2761. rc = pci_enable_device(pdev);
  2762. if (rc)
  2763. return rc;
  2764. rc = pci_request_regions(pdev, DRV_NAME);
  2765. if (rc)
  2766. goto err_out;
  2767. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2768. if (rc)
  2769. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  2770. if (rc) {
  2771. dev_err(&pdev->dev, "DMA mask error %d\n", rc);
  2772. goto err_out_regions;
  2773. }
  2774. pci_set_master(pdev);
  2775. rc = pci_enable_pcie_error_reporting(pdev);
  2776. if (rc) {
  2777. dev_err(&pdev->dev,
  2778. "bad enable of PCIe error reporting rc=%d\n", rc);
  2779. skdev->pcie_error_reporting_is_enabled = 0;
  2780. } else
  2781. skdev->pcie_error_reporting_is_enabled = 1;
  2782. for (i = 0; i < SKD_MAX_BARS; i++) {
  2783. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  2784. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  2785. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  2786. skdev->mem_size[i]);
  2787. if (!skdev->mem_map[i]) {
  2788. dev_err(&pdev->dev, "Unable to map adapter memory!\n");
  2789. rc = -ENODEV;
  2790. goto err_out_iounmap;
  2791. }
  2792. dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
  2793. skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
  2794. skdev->mem_size[i]);
  2795. }
  2796. rc = skd_acquire_irq(skdev);
  2797. if (rc) {
  2798. dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
  2799. goto err_out_iounmap;
  2800. }
  2801. rc = skd_start_timer(skdev);
  2802. if (rc)
  2803. goto err_out_timer;
  2804. init_waitqueue_head(&skdev->waitq);
  2805. skd_start_device(skdev);
  2806. return rc;
  2807. err_out_timer:
  2808. skd_stop_device(skdev);
  2809. skd_release_irq(skdev);
  2810. err_out_iounmap:
  2811. for (i = 0; i < SKD_MAX_BARS; i++)
  2812. if (skdev->mem_map[i])
  2813. iounmap(skdev->mem_map[i]);
  2814. if (skdev->pcie_error_reporting_is_enabled)
  2815. pci_disable_pcie_error_reporting(pdev);
  2816. err_out_regions:
  2817. pci_release_regions(pdev);
  2818. err_out:
  2819. pci_disable_device(pdev);
  2820. return rc;
  2821. }
  2822. static void skd_pci_shutdown(struct pci_dev *pdev)
  2823. {
  2824. struct skd_device *skdev;
  2825. dev_err(&pdev->dev, "%s called\n", __func__);
  2826. skdev = pci_get_drvdata(pdev);
  2827. if (!skdev) {
  2828. dev_err(&pdev->dev, "no device data for PCI\n");
  2829. return;
  2830. }
  2831. dev_err(&pdev->dev, "calling stop\n");
  2832. skd_stop_device(skdev);
  2833. }
  2834. static struct pci_driver skd_driver = {
  2835. .name = DRV_NAME,
  2836. .id_table = skd_pci_tbl,
  2837. .probe = skd_pci_probe,
  2838. .remove = skd_pci_remove,
  2839. .suspend = skd_pci_suspend,
  2840. .resume = skd_pci_resume,
  2841. .shutdown = skd_pci_shutdown,
  2842. };
  2843. /*
  2844. *****************************************************************************
  2845. * LOGGING SUPPORT
  2846. *****************************************************************************
  2847. */
  2848. const char *skd_drive_state_to_str(int state)
  2849. {
  2850. switch (state) {
  2851. case FIT_SR_DRIVE_OFFLINE:
  2852. return "OFFLINE";
  2853. case FIT_SR_DRIVE_INIT:
  2854. return "INIT";
  2855. case FIT_SR_DRIVE_ONLINE:
  2856. return "ONLINE";
  2857. case FIT_SR_DRIVE_BUSY:
  2858. return "BUSY";
  2859. case FIT_SR_DRIVE_FAULT:
  2860. return "FAULT";
  2861. case FIT_SR_DRIVE_DEGRADED:
  2862. return "DEGRADED";
  2863. case FIT_SR_PCIE_LINK_DOWN:
  2864. return "INK_DOWN";
  2865. case FIT_SR_DRIVE_SOFT_RESET:
  2866. return "SOFT_RESET";
  2867. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  2868. return "NEED_FW";
  2869. case FIT_SR_DRIVE_INIT_FAULT:
  2870. return "INIT_FAULT";
  2871. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2872. return "BUSY_SANITIZE";
  2873. case FIT_SR_DRIVE_BUSY_ERASE:
  2874. return "BUSY_ERASE";
  2875. case FIT_SR_DRIVE_FW_BOOTING:
  2876. return "FW_BOOTING";
  2877. default:
  2878. return "???";
  2879. }
  2880. }
  2881. const char *skd_skdev_state_to_str(enum skd_drvr_state state)
  2882. {
  2883. switch (state) {
  2884. case SKD_DRVR_STATE_LOAD:
  2885. return "LOAD";
  2886. case SKD_DRVR_STATE_IDLE:
  2887. return "IDLE";
  2888. case SKD_DRVR_STATE_BUSY:
  2889. return "BUSY";
  2890. case SKD_DRVR_STATE_STARTING:
  2891. return "STARTING";
  2892. case SKD_DRVR_STATE_ONLINE:
  2893. return "ONLINE";
  2894. case SKD_DRVR_STATE_PAUSING:
  2895. return "PAUSING";
  2896. case SKD_DRVR_STATE_PAUSED:
  2897. return "PAUSED";
  2898. case SKD_DRVR_STATE_RESTARTING:
  2899. return "RESTARTING";
  2900. case SKD_DRVR_STATE_RESUMING:
  2901. return "RESUMING";
  2902. case SKD_DRVR_STATE_STOPPING:
  2903. return "STOPPING";
  2904. case SKD_DRVR_STATE_SYNCING:
  2905. return "SYNCING";
  2906. case SKD_DRVR_STATE_FAULT:
  2907. return "FAULT";
  2908. case SKD_DRVR_STATE_DISAPPEARED:
  2909. return "DISAPPEARED";
  2910. case SKD_DRVR_STATE_BUSY_ERASE:
  2911. return "BUSY_ERASE";
  2912. case SKD_DRVR_STATE_BUSY_SANITIZE:
  2913. return "BUSY_SANITIZE";
  2914. case SKD_DRVR_STATE_BUSY_IMMINENT:
  2915. return "BUSY_IMMINENT";
  2916. case SKD_DRVR_STATE_WAIT_BOOT:
  2917. return "WAIT_BOOT";
  2918. default:
  2919. return "???";
  2920. }
  2921. }
  2922. static const char *skd_skreq_state_to_str(enum skd_req_state state)
  2923. {
  2924. switch (state) {
  2925. case SKD_REQ_STATE_IDLE:
  2926. return "IDLE";
  2927. case SKD_REQ_STATE_SETUP:
  2928. return "SETUP";
  2929. case SKD_REQ_STATE_BUSY:
  2930. return "BUSY";
  2931. case SKD_REQ_STATE_COMPLETED:
  2932. return "COMPLETED";
  2933. case SKD_REQ_STATE_TIMEOUT:
  2934. return "TIMEOUT";
  2935. default:
  2936. return "???";
  2937. }
  2938. }
  2939. static void skd_log_skdev(struct skd_device *skdev, const char *event)
  2940. {
  2941. dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
  2942. dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
  2943. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  2944. skd_skdev_state_to_str(skdev->state), skdev->state);
  2945. dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
  2946. skd_in_flight(skdev), skdev->cur_max_queue_depth,
  2947. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  2948. dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
  2949. skdev->skcomp_cycle, skdev->skcomp_ix);
  2950. }
  2951. static void skd_log_skreq(struct skd_device *skdev,
  2952. struct skd_request_context *skreq, const char *event)
  2953. {
  2954. struct request *req = blk_mq_rq_from_pdu(skreq);
  2955. u32 lba = blk_rq_pos(req);
  2956. u32 count = blk_rq_sectors(req);
  2957. dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
  2958. dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
  2959. skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
  2960. skreq->fitmsg_id);
  2961. dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
  2962. skreq->data_dir, skreq->n_sg);
  2963. dev_dbg(&skdev->pdev->dev,
  2964. "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
  2965. count, count, (int)rq_data_dir(req));
  2966. }
  2967. /*
  2968. *****************************************************************************
  2969. * MODULE GLUE
  2970. *****************************************************************************
  2971. */
  2972. static int __init skd_init(void)
  2973. {
  2974. BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
  2975. BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
  2976. BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
  2977. BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
  2978. BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
  2979. BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
  2980. BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
  2981. BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
  2982. switch (skd_isr_type) {
  2983. case SKD_IRQ_LEGACY:
  2984. case SKD_IRQ_MSI:
  2985. case SKD_IRQ_MSIX:
  2986. break;
  2987. default:
  2988. pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
  2989. skd_isr_type, SKD_IRQ_DEFAULT);
  2990. skd_isr_type = SKD_IRQ_DEFAULT;
  2991. }
  2992. if (skd_max_queue_depth < 1 ||
  2993. skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
  2994. pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
  2995. skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
  2996. skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  2997. }
  2998. if (skd_max_req_per_msg < 1 ||
  2999. skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
  3000. pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
  3001. skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
  3002. skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  3003. }
  3004. if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
  3005. pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
  3006. skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
  3007. skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  3008. }
  3009. if (skd_dbg_level < 0 || skd_dbg_level > 2) {
  3010. pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
  3011. skd_dbg_level, 0);
  3012. skd_dbg_level = 0;
  3013. }
  3014. if (skd_isr_comp_limit < 0) {
  3015. pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
  3016. skd_isr_comp_limit, 0);
  3017. skd_isr_comp_limit = 0;
  3018. }
  3019. return pci_register_driver(&skd_driver);
  3020. }
  3021. static void __exit skd_exit(void)
  3022. {
  3023. pci_unregister_driver(&skd_driver);
  3024. if (skd_major)
  3025. unregister_blkdev(skd_major, DRV_NAME);
  3026. }
  3027. module_init(skd_init);
  3028. module_exit(skd_exit);