commsup.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609
  1. /*
  2. * Adaptec AAC series RAID controller driver
  3. * (c) Copyright 2001 Red Hat Inc.
  4. *
  5. * based on the old aacraid driver that is..
  6. * Adaptec aacraid device driver for Linux.
  7. *
  8. * Copyright (c) 2000-2010 Adaptec, Inc.
  9. * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10. * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; see the file COPYING. If not, write to
  24. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25. *
  26. * Module Name:
  27. * commsup.c
  28. *
  29. * Abstract: Contain all routines that are required for FSA host/adapter
  30. * communication.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/init.h>
  35. #include <linux/types.h>
  36. #include <linux/sched.h>
  37. #include <linux/pci.h>
  38. #include <linux/spinlock.h>
  39. #include <linux/slab.h>
  40. #include <linux/completion.h>
  41. #include <linux/blkdev.h>
  42. #include <linux/delay.h>
  43. #include <linux/kthread.h>
  44. #include <linux/interrupt.h>
  45. #include <linux/semaphore.h>
  46. #include <linux/bcd.h>
  47. #include <scsi/scsi.h>
  48. #include <scsi/scsi_host.h>
  49. #include <scsi/scsi_device.h>
  50. #include <scsi/scsi_cmnd.h>
  51. #include "aacraid.h"
  52. /**
  53. * fib_map_alloc - allocate the fib objects
  54. * @dev: Adapter to allocate for
  55. *
  56. * Allocate and map the shared PCI space for the FIB blocks used to
  57. * talk to the Adaptec firmware.
  58. */
  59. static int fib_map_alloc(struct aac_dev *dev)
  60. {
  61. if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
  62. dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
  63. else
  64. dev->max_cmd_size = dev->max_fib_size;
  65. if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
  66. dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
  67. } else {
  68. dev->max_cmd_size = dev->max_fib_size;
  69. }
  70. dprintk((KERN_INFO
  71. "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
  72. &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
  73. AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
  74. dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev,
  75. (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
  76. * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
  77. &dev->hw_fib_pa, GFP_KERNEL);
  78. if (dev->hw_fib_va == NULL)
  79. return -ENOMEM;
  80. return 0;
  81. }
  82. /**
  83. * aac_fib_map_free - free the fib objects
  84. * @dev: Adapter to free
  85. *
  86. * Free the PCI mappings and the memory allocated for FIB blocks
  87. * on this adapter.
  88. */
  89. void aac_fib_map_free(struct aac_dev *dev)
  90. {
  91. size_t alloc_size;
  92. size_t fib_size;
  93. int num_fibs;
  94. if(!dev->hw_fib_va || !dev->max_cmd_size)
  95. return;
  96. num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
  97. fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
  98. alloc_size = fib_size * num_fibs + ALIGN32 - 1;
  99. dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va,
  100. dev->hw_fib_pa);
  101. dev->hw_fib_va = NULL;
  102. dev->hw_fib_pa = 0;
  103. }
  104. void aac_fib_vector_assign(struct aac_dev *dev)
  105. {
  106. u32 i = 0;
  107. u32 vector = 1;
  108. struct fib *fibptr = NULL;
  109. for (i = 0, fibptr = &dev->fibs[i];
  110. i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
  111. i++, fibptr++) {
  112. if ((dev->max_msix == 1) ||
  113. (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
  114. - dev->vector_cap))) {
  115. fibptr->vector_no = 0;
  116. } else {
  117. fibptr->vector_no = vector;
  118. vector++;
  119. if (vector == dev->max_msix)
  120. vector = 1;
  121. }
  122. }
  123. }
  124. /**
  125. * aac_fib_setup - setup the fibs
  126. * @dev: Adapter to set up
  127. *
  128. * Allocate the PCI space for the fibs, map it and then initialise the
  129. * fib area, the unmapped fib data and also the free list
  130. */
  131. int aac_fib_setup(struct aac_dev * dev)
  132. {
  133. struct fib *fibptr;
  134. struct hw_fib *hw_fib;
  135. dma_addr_t hw_fib_pa;
  136. int i;
  137. u32 max_cmds;
  138. while (((i = fib_map_alloc(dev)) == -ENOMEM)
  139. && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
  140. max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1;
  141. dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB;
  142. if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
  143. dev->init->r7.max_io_commands = cpu_to_le32(max_cmds);
  144. }
  145. if (i<0)
  146. return -ENOMEM;
  147. memset(dev->hw_fib_va, 0,
  148. (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
  149. (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
  150. /* 32 byte alignment for PMC */
  151. hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
  152. hw_fib = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
  153. (hw_fib_pa - dev->hw_fib_pa));
  154. /* add Xport header */
  155. hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
  156. sizeof(struct aac_fib_xporthdr));
  157. hw_fib_pa += sizeof(struct aac_fib_xporthdr);
  158. /*
  159. * Initialise the fibs
  160. */
  161. for (i = 0, fibptr = &dev->fibs[i];
  162. i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
  163. i++, fibptr++)
  164. {
  165. fibptr->flags = 0;
  166. fibptr->size = sizeof(struct fib);
  167. fibptr->dev = dev;
  168. fibptr->hw_fib_va = hw_fib;
  169. fibptr->data = (void *) fibptr->hw_fib_va->data;
  170. fibptr->next = fibptr+1; /* Forward chain the fibs */
  171. sema_init(&fibptr->event_wait, 0);
  172. spin_lock_init(&fibptr->event_lock);
  173. hw_fib->header.XferState = cpu_to_le32(0xffffffff);
  174. hw_fib->header.SenderSize =
  175. cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */
  176. fibptr->hw_fib_pa = hw_fib_pa;
  177. fibptr->hw_sgl_pa = hw_fib_pa +
  178. offsetof(struct aac_hba_cmd_req, sge[2]);
  179. /*
  180. * one element is for the ptr to the separate sg list,
  181. * second element for 32 byte alignment
  182. */
  183. fibptr->hw_error_pa = hw_fib_pa +
  184. offsetof(struct aac_native_hba, resp.resp_bytes[0]);
  185. hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
  186. dev->max_cmd_size + sizeof(struct aac_fib_xporthdr));
  187. hw_fib_pa = hw_fib_pa +
  188. dev->max_cmd_size + sizeof(struct aac_fib_xporthdr);
  189. }
  190. /*
  191. *Assign vector numbers to fibs
  192. */
  193. aac_fib_vector_assign(dev);
  194. /*
  195. * Add the fib chain to the free list
  196. */
  197. dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
  198. /*
  199. * Set 8 fibs aside for management tools
  200. */
  201. dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
  202. return 0;
  203. }
  204. /**
  205. * aac_fib_alloc_tag-allocate a fib using tags
  206. * @dev: Adapter to allocate the fib for
  207. *
  208. * Allocate a fib from the adapter fib pool using tags
  209. * from the blk layer.
  210. */
  211. struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
  212. {
  213. struct fib *fibptr;
  214. fibptr = &dev->fibs[scmd->request->tag];
  215. /*
  216. * Null out fields that depend on being zero at the start of
  217. * each I/O
  218. */
  219. fibptr->hw_fib_va->header.XferState = 0;
  220. fibptr->type = FSAFS_NTC_FIB_CONTEXT;
  221. fibptr->callback_data = NULL;
  222. fibptr->callback = NULL;
  223. return fibptr;
  224. }
  225. /**
  226. * aac_fib_alloc - allocate a fib
  227. * @dev: Adapter to allocate the fib for
  228. *
  229. * Allocate a fib from the adapter fib pool. If the pool is empty we
  230. * return NULL.
  231. */
  232. struct fib *aac_fib_alloc(struct aac_dev *dev)
  233. {
  234. struct fib * fibptr;
  235. unsigned long flags;
  236. spin_lock_irqsave(&dev->fib_lock, flags);
  237. fibptr = dev->free_fib;
  238. if(!fibptr){
  239. spin_unlock_irqrestore(&dev->fib_lock, flags);
  240. return fibptr;
  241. }
  242. dev->free_fib = fibptr->next;
  243. spin_unlock_irqrestore(&dev->fib_lock, flags);
  244. /*
  245. * Set the proper node type code and node byte size
  246. */
  247. fibptr->type = FSAFS_NTC_FIB_CONTEXT;
  248. fibptr->size = sizeof(struct fib);
  249. /*
  250. * Null out fields that depend on being zero at the start of
  251. * each I/O
  252. */
  253. fibptr->hw_fib_va->header.XferState = 0;
  254. fibptr->flags = 0;
  255. fibptr->callback = NULL;
  256. fibptr->callback_data = NULL;
  257. return fibptr;
  258. }
  259. /**
  260. * aac_fib_free - free a fib
  261. * @fibptr: fib to free up
  262. *
  263. * Frees up a fib and places it on the appropriate queue
  264. */
  265. void aac_fib_free(struct fib *fibptr)
  266. {
  267. unsigned long flags;
  268. if (fibptr->done == 2)
  269. return;
  270. spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
  271. if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
  272. aac_config.fib_timeouts++;
  273. if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
  274. fibptr->hw_fib_va->header.XferState != 0) {
  275. printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
  276. (void*)fibptr,
  277. le32_to_cpu(fibptr->hw_fib_va->header.XferState));
  278. }
  279. fibptr->next = fibptr->dev->free_fib;
  280. fibptr->dev->free_fib = fibptr;
  281. spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
  282. }
  283. /**
  284. * aac_fib_init - initialise a fib
  285. * @fibptr: The fib to initialize
  286. *
  287. * Set up the generic fib fields ready for use
  288. */
  289. void aac_fib_init(struct fib *fibptr)
  290. {
  291. struct hw_fib *hw_fib = fibptr->hw_fib_va;
  292. memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
  293. hw_fib->header.StructType = FIB_MAGIC;
  294. hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
  295. hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
  296. hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
  297. hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
  298. }
  299. /**
  300. * fib_deallocate - deallocate a fib
  301. * @fibptr: fib to deallocate
  302. *
  303. * Will deallocate and return to the free pool the FIB pointed to by the
  304. * caller.
  305. */
  306. static void fib_dealloc(struct fib * fibptr)
  307. {
  308. struct hw_fib *hw_fib = fibptr->hw_fib_va;
  309. hw_fib->header.XferState = 0;
  310. }
  311. /*
  312. * Commuication primitives define and support the queuing method we use to
  313. * support host to adapter commuication. All queue accesses happen through
  314. * these routines and are the only routines which have a knowledge of the
  315. * how these queues are implemented.
  316. */
  317. /**
  318. * aac_get_entry - get a queue entry
  319. * @dev: Adapter
  320. * @qid: Queue Number
  321. * @entry: Entry return
  322. * @index: Index return
  323. * @nonotify: notification control
  324. *
  325. * With a priority the routine returns a queue entry if the queue has free entries. If the queue
  326. * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
  327. * returned.
  328. */
  329. static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
  330. {
  331. struct aac_queue * q;
  332. unsigned long idx;
  333. /*
  334. * All of the queues wrap when they reach the end, so we check
  335. * to see if they have reached the end and if they have we just
  336. * set the index back to zero. This is a wrap. You could or off
  337. * the high bits in all updates but this is a bit faster I think.
  338. */
  339. q = &dev->queues->queue[qid];
  340. idx = *index = le32_to_cpu(*(q->headers.producer));
  341. /* Interrupt Moderation, only interrupt for first two entries */
  342. if (idx != le32_to_cpu(*(q->headers.consumer))) {
  343. if (--idx == 0) {
  344. if (qid == AdapNormCmdQueue)
  345. idx = ADAP_NORM_CMD_ENTRIES;
  346. else
  347. idx = ADAP_NORM_RESP_ENTRIES;
  348. }
  349. if (idx != le32_to_cpu(*(q->headers.consumer)))
  350. *nonotify = 1;
  351. }
  352. if (qid == AdapNormCmdQueue) {
  353. if (*index >= ADAP_NORM_CMD_ENTRIES)
  354. *index = 0; /* Wrap to front of the Producer Queue. */
  355. } else {
  356. if (*index >= ADAP_NORM_RESP_ENTRIES)
  357. *index = 0; /* Wrap to front of the Producer Queue. */
  358. }
  359. /* Queue is full */
  360. if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
  361. printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
  362. qid, atomic_read(&q->numpending));
  363. return 0;
  364. } else {
  365. *entry = q->base + *index;
  366. return 1;
  367. }
  368. }
  369. /**
  370. * aac_queue_get - get the next free QE
  371. * @dev: Adapter
  372. * @index: Returned index
  373. * @priority: Priority of fib
  374. * @fib: Fib to associate with the queue entry
  375. * @wait: Wait if queue full
  376. * @fibptr: Driver fib object to go with fib
  377. * @nonotify: Don't notify the adapter
  378. *
  379. * Gets the next free QE off the requested priorty adapter command
  380. * queue and associates the Fib with the QE. The QE represented by
  381. * index is ready to insert on the queue when this routine returns
  382. * success.
  383. */
  384. int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
  385. {
  386. struct aac_entry * entry = NULL;
  387. int map = 0;
  388. if (qid == AdapNormCmdQueue) {
  389. /* if no entries wait for some if caller wants to */
  390. while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
  391. printk(KERN_ERR "GetEntries failed\n");
  392. }
  393. /*
  394. * Setup queue entry with a command, status and fib mapped
  395. */
  396. entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
  397. map = 1;
  398. } else {
  399. while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
  400. /* if no entries wait for some if caller wants to */
  401. }
  402. /*
  403. * Setup queue entry with command, status and fib mapped
  404. */
  405. entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
  406. entry->addr = hw_fib->header.SenderFibAddress;
  407. /* Restore adapters pointer to the FIB */
  408. hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
  409. map = 0;
  410. }
  411. /*
  412. * If MapFib is true than we need to map the Fib and put pointers
  413. * in the queue entry.
  414. */
  415. if (map)
  416. entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
  417. return 0;
  418. }
  419. #ifdef CONFIG_EEH
  420. static inline int aac_check_eeh_failure(struct aac_dev *dev)
  421. {
  422. /* Check for an EEH failure for the given
  423. * device node. Function eeh_dev_check_failure()
  424. * returns 0 if there has not been an EEH error
  425. * otherwise returns a non-zero value.
  426. *
  427. * Need to be called before any PCI operation,
  428. * i.e.,before aac_adapter_check_health()
  429. */
  430. struct eeh_dev *edev = pci_dev_to_eeh_dev(dev->pdev);
  431. if (eeh_dev_check_failure(edev)) {
  432. /* The EEH mechanisms will handle this
  433. * error and reset the device if
  434. * necessary.
  435. */
  436. return 1;
  437. }
  438. return 0;
  439. }
  440. #else
  441. static inline int aac_check_eeh_failure(struct aac_dev *dev)
  442. {
  443. return 0;
  444. }
  445. #endif
  446. /*
  447. * Define the highest level of host to adapter communication routines.
  448. * These routines will support host to adapter FS commuication. These
  449. * routines have no knowledge of the commuication method used. This level
  450. * sends and receives FIBs. This level has no knowledge of how these FIBs
  451. * get passed back and forth.
  452. */
  453. /**
  454. * aac_fib_send - send a fib to the adapter
  455. * @command: Command to send
  456. * @fibptr: The fib
  457. * @size: Size of fib data area
  458. * @priority: Priority of Fib
  459. * @wait: Async/sync select
  460. * @reply: True if a reply is wanted
  461. * @callback: Called with reply
  462. * @callback_data: Passed to callback
  463. *
  464. * Sends the requested FIB to the adapter and optionally will wait for a
  465. * response FIB. If the caller does not wish to wait for a response than
  466. * an event to wait on must be supplied. This event will be set when a
  467. * response FIB is received from the adapter.
  468. */
  469. int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
  470. int priority, int wait, int reply, fib_callback callback,
  471. void *callback_data)
  472. {
  473. struct aac_dev * dev = fibptr->dev;
  474. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  475. unsigned long flags = 0;
  476. unsigned long mflags = 0;
  477. unsigned long sflags = 0;
  478. if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
  479. return -EBUSY;
  480. if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))
  481. return -EINVAL;
  482. /*
  483. * There are 5 cases with the wait and response requested flags.
  484. * The only invalid cases are if the caller requests to wait and
  485. * does not request a response and if the caller does not want a
  486. * response and the Fib is not allocated from pool. If a response
  487. * is not requesed the Fib will just be deallocaed by the DPC
  488. * routine when the response comes back from the adapter. No
  489. * further processing will be done besides deleting the Fib. We
  490. * will have a debug mode where the adapter can notify the host
  491. * it had a problem and the host can log that fact.
  492. */
  493. fibptr->flags = 0;
  494. if (wait && !reply) {
  495. return -EINVAL;
  496. } else if (!wait && reply) {
  497. hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
  498. FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
  499. } else if (!wait && !reply) {
  500. hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
  501. FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
  502. } else if (wait && reply) {
  503. hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
  504. FIB_COUNTER_INCREMENT(aac_config.NormalSent);
  505. }
  506. /*
  507. * Map the fib into 32bits by using the fib number
  508. */
  509. hw_fib->header.SenderFibAddress =
  510. cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
  511. /* use the same shifted value for handle to be compatible
  512. * with the new native hba command handle
  513. */
  514. hw_fib->header.Handle =
  515. cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
  516. /*
  517. * Set FIB state to indicate where it came from and if we want a
  518. * response from the adapter. Also load the command from the
  519. * caller.
  520. *
  521. * Map the hw fib pointer as a 32bit value
  522. */
  523. hw_fib->header.Command = cpu_to_le16(command);
  524. hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
  525. /*
  526. * Set the size of the Fib we want to send to the adapter
  527. */
  528. hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
  529. if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
  530. return -EMSGSIZE;
  531. }
  532. /*
  533. * Get a queue entry connect the FIB to it and send an notify
  534. * the adapter a command is ready.
  535. */
  536. hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
  537. /*
  538. * Fill in the Callback and CallbackContext if we are not
  539. * going to wait.
  540. */
  541. if (!wait) {
  542. fibptr->callback = callback;
  543. fibptr->callback_data = callback_data;
  544. fibptr->flags = FIB_CONTEXT_FLAG;
  545. }
  546. fibptr->done = 0;
  547. FIB_COUNTER_INCREMENT(aac_config.FibsSent);
  548. dprintk((KERN_DEBUG "Fib contents:.\n"));
  549. dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
  550. dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
  551. dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
  552. dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va));
  553. dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
  554. dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
  555. if (!dev->queues)
  556. return -EBUSY;
  557. if (wait) {
  558. spin_lock_irqsave(&dev->manage_lock, mflags);
  559. if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
  560. printk(KERN_INFO "No management Fibs Available:%d\n",
  561. dev->management_fib_count);
  562. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  563. return -EBUSY;
  564. }
  565. dev->management_fib_count++;
  566. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  567. spin_lock_irqsave(&fibptr->event_lock, flags);
  568. }
  569. if (dev->sync_mode) {
  570. if (wait)
  571. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  572. spin_lock_irqsave(&dev->sync_lock, sflags);
  573. if (dev->sync_fib) {
  574. list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
  575. spin_unlock_irqrestore(&dev->sync_lock, sflags);
  576. } else {
  577. dev->sync_fib = fibptr;
  578. spin_unlock_irqrestore(&dev->sync_lock, sflags);
  579. aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
  580. (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
  581. NULL, NULL, NULL, NULL, NULL);
  582. }
  583. if (wait) {
  584. fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
  585. if (down_interruptible(&fibptr->event_wait)) {
  586. fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
  587. return -EFAULT;
  588. }
  589. return 0;
  590. }
  591. return -EINPROGRESS;
  592. }
  593. if (aac_adapter_deliver(fibptr) != 0) {
  594. printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
  595. if (wait) {
  596. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  597. spin_lock_irqsave(&dev->manage_lock, mflags);
  598. dev->management_fib_count--;
  599. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  600. }
  601. return -EBUSY;
  602. }
  603. /*
  604. * If the caller wanted us to wait for response wait now.
  605. */
  606. if (wait) {
  607. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  608. /* Only set for first known interruptable command */
  609. if (wait < 0) {
  610. /*
  611. * *VERY* Dangerous to time out a command, the
  612. * assumption is made that we have no hope of
  613. * functioning because an interrupt routing or other
  614. * hardware failure has occurred.
  615. */
  616. unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
  617. while (down_trylock(&fibptr->event_wait)) {
  618. int blink;
  619. if (time_is_before_eq_jiffies(timeout)) {
  620. struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
  621. atomic_dec(&q->numpending);
  622. if (wait == -1) {
  623. printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
  624. "Usually a result of a PCI interrupt routing problem;\n"
  625. "update mother board BIOS or consider utilizing one of\n"
  626. "the SAFE mode kernel options (acpi, apic etc)\n");
  627. }
  628. return -ETIMEDOUT;
  629. }
  630. if (aac_check_eeh_failure(dev))
  631. return -EFAULT;
  632. if ((blink = aac_adapter_check_health(dev)) > 0) {
  633. if (wait == -1) {
  634. printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
  635. "Usually a result of a serious unrecoverable hardware problem\n",
  636. blink);
  637. }
  638. return -EFAULT;
  639. }
  640. /*
  641. * Allow other processes / CPUS to use core
  642. */
  643. schedule();
  644. }
  645. } else if (down_interruptible(&fibptr->event_wait)) {
  646. /* Do nothing ... satisfy
  647. * down_interruptible must_check */
  648. }
  649. spin_lock_irqsave(&fibptr->event_lock, flags);
  650. if (fibptr->done == 0) {
  651. fibptr->done = 2; /* Tell interrupt we aborted */
  652. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  653. return -ERESTARTSYS;
  654. }
  655. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  656. BUG_ON(fibptr->done == 0);
  657. if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
  658. return -ETIMEDOUT;
  659. return 0;
  660. }
  661. /*
  662. * If the user does not want a response than return success otherwise
  663. * return pending
  664. */
  665. if (reply)
  666. return -EINPROGRESS;
  667. else
  668. return 0;
  669. }
  670. int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
  671. void *callback_data)
  672. {
  673. struct aac_dev *dev = fibptr->dev;
  674. int wait;
  675. unsigned long flags = 0;
  676. unsigned long mflags = 0;
  677. fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
  678. if (callback) {
  679. wait = 0;
  680. fibptr->callback = callback;
  681. fibptr->callback_data = callback_data;
  682. } else
  683. wait = 1;
  684. if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
  685. struct aac_hba_cmd_req *hbacmd =
  686. (struct aac_hba_cmd_req *)fibptr->hw_fib_va;
  687. hbacmd->iu_type = command;
  688. /* bit1 of request_id must be 0 */
  689. hbacmd->request_id =
  690. cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
  691. } else
  692. return -EINVAL;
  693. if (wait) {
  694. spin_lock_irqsave(&dev->manage_lock, mflags);
  695. if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
  696. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  697. return -EBUSY;
  698. }
  699. dev->management_fib_count++;
  700. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  701. spin_lock_irqsave(&fibptr->event_lock, flags);
  702. }
  703. if (aac_adapter_deliver(fibptr) != 0) {
  704. if (wait) {
  705. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  706. spin_lock_irqsave(&dev->manage_lock, mflags);
  707. dev->management_fib_count--;
  708. spin_unlock_irqrestore(&dev->manage_lock, mflags);
  709. }
  710. return -EBUSY;
  711. }
  712. FIB_COUNTER_INCREMENT(aac_config.NativeSent);
  713. if (wait) {
  714. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  715. if (aac_check_eeh_failure(dev))
  716. return -EFAULT;
  717. /* Only set for first known interruptable command */
  718. if (down_interruptible(&fibptr->event_wait)) {
  719. fibptr->done = 2;
  720. up(&fibptr->event_wait);
  721. }
  722. spin_lock_irqsave(&fibptr->event_lock, flags);
  723. if ((fibptr->done == 0) || (fibptr->done == 2)) {
  724. fibptr->done = 2; /* Tell interrupt we aborted */
  725. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  726. return -ERESTARTSYS;
  727. }
  728. spin_unlock_irqrestore(&fibptr->event_lock, flags);
  729. WARN_ON(fibptr->done == 0);
  730. if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
  731. return -ETIMEDOUT;
  732. return 0;
  733. }
  734. return -EINPROGRESS;
  735. }
  736. /**
  737. * aac_consumer_get - get the top of the queue
  738. * @dev: Adapter
  739. * @q: Queue
  740. * @entry: Return entry
  741. *
  742. * Will return a pointer to the entry on the top of the queue requested that
  743. * we are a consumer of, and return the address of the queue entry. It does
  744. * not change the state of the queue.
  745. */
  746. int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
  747. {
  748. u32 index;
  749. int status;
  750. if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
  751. status = 0;
  752. } else {
  753. /*
  754. * The consumer index must be wrapped if we have reached
  755. * the end of the queue, else we just use the entry
  756. * pointed to by the header index
  757. */
  758. if (le32_to_cpu(*q->headers.consumer) >= q->entries)
  759. index = 0;
  760. else
  761. index = le32_to_cpu(*q->headers.consumer);
  762. *entry = q->base + index;
  763. status = 1;
  764. }
  765. return(status);
  766. }
  767. /**
  768. * aac_consumer_free - free consumer entry
  769. * @dev: Adapter
  770. * @q: Queue
  771. * @qid: Queue ident
  772. *
  773. * Frees up the current top of the queue we are a consumer of. If the
  774. * queue was full notify the producer that the queue is no longer full.
  775. */
  776. void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
  777. {
  778. int wasfull = 0;
  779. u32 notify;
  780. if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
  781. wasfull = 1;
  782. if (le32_to_cpu(*q->headers.consumer) >= q->entries)
  783. *q->headers.consumer = cpu_to_le32(1);
  784. else
  785. le32_add_cpu(q->headers.consumer, 1);
  786. if (wasfull) {
  787. switch (qid) {
  788. case HostNormCmdQueue:
  789. notify = HostNormCmdNotFull;
  790. break;
  791. case HostNormRespQueue:
  792. notify = HostNormRespNotFull;
  793. break;
  794. default:
  795. BUG();
  796. return;
  797. }
  798. aac_adapter_notify(dev, notify);
  799. }
  800. }
  801. /**
  802. * aac_fib_adapter_complete - complete adapter issued fib
  803. * @fibptr: fib to complete
  804. * @size: size of fib
  805. *
  806. * Will do all necessary work to complete a FIB that was sent from
  807. * the adapter.
  808. */
  809. int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
  810. {
  811. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  812. struct aac_dev * dev = fibptr->dev;
  813. struct aac_queue * q;
  814. unsigned long nointr = 0;
  815. unsigned long qflags;
  816. if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
  817. dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
  818. dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
  819. kfree(hw_fib);
  820. return 0;
  821. }
  822. if (hw_fib->header.XferState == 0) {
  823. if (dev->comm_interface == AAC_COMM_MESSAGE)
  824. kfree(hw_fib);
  825. return 0;
  826. }
  827. /*
  828. * If we plan to do anything check the structure type first.
  829. */
  830. if (hw_fib->header.StructType != FIB_MAGIC &&
  831. hw_fib->header.StructType != FIB_MAGIC2 &&
  832. hw_fib->header.StructType != FIB_MAGIC2_64) {
  833. if (dev->comm_interface == AAC_COMM_MESSAGE)
  834. kfree(hw_fib);
  835. return -EINVAL;
  836. }
  837. /*
  838. * This block handles the case where the adapter had sent us a
  839. * command and we have finished processing the command. We
  840. * call completeFib when we are done processing the command
  841. * and want to send a response back to the adapter. This will
  842. * send the completed cdb to the adapter.
  843. */
  844. if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
  845. if (dev->comm_interface == AAC_COMM_MESSAGE) {
  846. kfree (hw_fib);
  847. } else {
  848. u32 index;
  849. hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
  850. if (size) {
  851. size += sizeof(struct aac_fibhdr);
  852. if (size > le16_to_cpu(hw_fib->header.SenderSize))
  853. return -EMSGSIZE;
  854. hw_fib->header.Size = cpu_to_le16(size);
  855. }
  856. q = &dev->queues->queue[AdapNormRespQueue];
  857. spin_lock_irqsave(q->lock, qflags);
  858. aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
  859. *(q->headers.producer) = cpu_to_le32(index + 1);
  860. spin_unlock_irqrestore(q->lock, qflags);
  861. if (!(nointr & (int)aac_config.irq_mod))
  862. aac_adapter_notify(dev, AdapNormRespQueue);
  863. }
  864. } else {
  865. printk(KERN_WARNING "aac_fib_adapter_complete: "
  866. "Unknown xferstate detected.\n");
  867. BUG();
  868. }
  869. return 0;
  870. }
  871. /**
  872. * aac_fib_complete - fib completion handler
  873. * @fib: FIB to complete
  874. *
  875. * Will do all necessary work to complete a FIB.
  876. */
  877. int aac_fib_complete(struct fib *fibptr)
  878. {
  879. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  880. if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
  881. fib_dealloc(fibptr);
  882. return 0;
  883. }
  884. /*
  885. * Check for a fib which has already been completed or with a
  886. * status wait timeout
  887. */
  888. if (hw_fib->header.XferState == 0 || fibptr->done == 2)
  889. return 0;
  890. /*
  891. * If we plan to do anything check the structure type first.
  892. */
  893. if (hw_fib->header.StructType != FIB_MAGIC &&
  894. hw_fib->header.StructType != FIB_MAGIC2 &&
  895. hw_fib->header.StructType != FIB_MAGIC2_64)
  896. return -EINVAL;
  897. /*
  898. * This block completes a cdb which orginated on the host and we
  899. * just need to deallocate the cdb or reinit it. At this point the
  900. * command is complete that we had sent to the adapter and this
  901. * cdb could be reused.
  902. */
  903. if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
  904. (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
  905. {
  906. fib_dealloc(fibptr);
  907. }
  908. else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
  909. {
  910. /*
  911. * This handles the case when the host has aborted the I/O
  912. * to the adapter because the adapter is not responding
  913. */
  914. fib_dealloc(fibptr);
  915. } else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
  916. fib_dealloc(fibptr);
  917. } else {
  918. BUG();
  919. }
  920. return 0;
  921. }
  922. /**
  923. * aac_printf - handle printf from firmware
  924. * @dev: Adapter
  925. * @val: Message info
  926. *
  927. * Print a message passed to us by the controller firmware on the
  928. * Adaptec board
  929. */
  930. void aac_printf(struct aac_dev *dev, u32 val)
  931. {
  932. char *cp = dev->printfbuf;
  933. if (dev->printf_enabled)
  934. {
  935. int length = val & 0xffff;
  936. int level = (val >> 16) & 0xffff;
  937. /*
  938. * The size of the printfbuf is set in port.c
  939. * There is no variable or define for it
  940. */
  941. if (length > 255)
  942. length = 255;
  943. if (cp[length] != 0)
  944. cp[length] = 0;
  945. if (level == LOG_AAC_HIGH_ERROR)
  946. printk(KERN_WARNING "%s:%s", dev->name, cp);
  947. else
  948. printk(KERN_INFO "%s:%s", dev->name, cp);
  949. }
  950. memset(cp, 0, 256);
  951. }
  952. static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index)
  953. {
  954. return le32_to_cpu(((__le32 *)aifcmd->data)[index]);
  955. }
  956. static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
  957. {
  958. switch (aac_aif_data(aifcmd, 1)) {
  959. case AifBuCacheDataLoss:
  960. if (aac_aif_data(aifcmd, 2))
  961. dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n",
  962. aac_aif_data(aifcmd, 2));
  963. else
  964. dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n");
  965. break;
  966. case AifBuCacheDataRecover:
  967. if (aac_aif_data(aifcmd, 2))
  968. dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n",
  969. aac_aif_data(aifcmd, 2));
  970. else
  971. dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n");
  972. break;
  973. }
  974. }
  975. /**
  976. * aac_handle_aif - Handle a message from the firmware
  977. * @dev: Which adapter this fib is from
  978. * @fibptr: Pointer to fibptr from adapter
  979. *
  980. * This routine handles a driver notify fib from the adapter and
  981. * dispatches it to the appropriate routine for handling.
  982. */
  983. #define AIF_SNIFF_TIMEOUT (500*HZ)
  984. static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
  985. {
  986. struct hw_fib * hw_fib = fibptr->hw_fib_va;
  987. struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
  988. u32 channel, id, lun, container;
  989. struct scsi_device *device;
  990. enum {
  991. NOTHING,
  992. DELETE,
  993. ADD,
  994. CHANGE
  995. } device_config_needed = NOTHING;
  996. /* Sniff for container changes */
  997. if (!dev || !dev->fsa_dev)
  998. return;
  999. container = channel = id = lun = (u32)-1;
  1000. /*
  1001. * We have set this up to try and minimize the number of
  1002. * re-configures that take place. As a result of this when
  1003. * certain AIF's come in we will set a flag waiting for another
  1004. * type of AIF before setting the re-config flag.
  1005. */
  1006. switch (le32_to_cpu(aifcmd->command)) {
  1007. case AifCmdDriverNotify:
  1008. switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
  1009. case AifRawDeviceRemove:
  1010. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  1011. if ((container >> 28)) {
  1012. container = (u32)-1;
  1013. break;
  1014. }
  1015. channel = (container >> 24) & 0xF;
  1016. if (channel >= dev->maximum_num_channels) {
  1017. container = (u32)-1;
  1018. break;
  1019. }
  1020. id = container & 0xFFFF;
  1021. if (id >= dev->maximum_num_physicals) {
  1022. container = (u32)-1;
  1023. break;
  1024. }
  1025. lun = (container >> 16) & 0xFF;
  1026. container = (u32)-1;
  1027. channel = aac_phys_to_logical(channel);
  1028. device_config_needed = DELETE;
  1029. break;
  1030. /*
  1031. * Morph or Expand complete
  1032. */
  1033. case AifDenMorphComplete:
  1034. case AifDenVolumeExtendComplete:
  1035. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  1036. if (container >= dev->maximum_num_containers)
  1037. break;
  1038. /*
  1039. * Find the scsi_device associated with the SCSI
  1040. * address. Make sure we have the right array, and if
  1041. * so set the flag to initiate a new re-config once we
  1042. * see an AifEnConfigChange AIF come through.
  1043. */
  1044. if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
  1045. device = scsi_device_lookup(dev->scsi_host_ptr,
  1046. CONTAINER_TO_CHANNEL(container),
  1047. CONTAINER_TO_ID(container),
  1048. CONTAINER_TO_LUN(container));
  1049. if (device) {
  1050. dev->fsa_dev[container].config_needed = CHANGE;
  1051. dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
  1052. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  1053. scsi_device_put(device);
  1054. }
  1055. }
  1056. }
  1057. /*
  1058. * If we are waiting on something and this happens to be
  1059. * that thing then set the re-configure flag.
  1060. */
  1061. if (container != (u32)-1) {
  1062. if (container >= dev->maximum_num_containers)
  1063. break;
  1064. if ((dev->fsa_dev[container].config_waiting_on ==
  1065. le32_to_cpu(*(__le32 *)aifcmd->data)) &&
  1066. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  1067. dev->fsa_dev[container].config_waiting_on = 0;
  1068. } else for (container = 0;
  1069. container < dev->maximum_num_containers; ++container) {
  1070. if ((dev->fsa_dev[container].config_waiting_on ==
  1071. le32_to_cpu(*(__le32 *)aifcmd->data)) &&
  1072. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  1073. dev->fsa_dev[container].config_waiting_on = 0;
  1074. }
  1075. break;
  1076. case AifCmdEventNotify:
  1077. switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
  1078. case AifEnBatteryEvent:
  1079. dev->cache_protected =
  1080. (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
  1081. break;
  1082. /*
  1083. * Add an Array.
  1084. */
  1085. case AifEnAddContainer:
  1086. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  1087. if (container >= dev->maximum_num_containers)
  1088. break;
  1089. dev->fsa_dev[container].config_needed = ADD;
  1090. dev->fsa_dev[container].config_waiting_on =
  1091. AifEnConfigChange;
  1092. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  1093. break;
  1094. /*
  1095. * Delete an Array.
  1096. */
  1097. case AifEnDeleteContainer:
  1098. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  1099. if (container >= dev->maximum_num_containers)
  1100. break;
  1101. dev->fsa_dev[container].config_needed = DELETE;
  1102. dev->fsa_dev[container].config_waiting_on =
  1103. AifEnConfigChange;
  1104. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  1105. break;
  1106. /*
  1107. * Container change detected. If we currently are not
  1108. * waiting on something else, setup to wait on a Config Change.
  1109. */
  1110. case AifEnContainerChange:
  1111. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  1112. if (container >= dev->maximum_num_containers)
  1113. break;
  1114. if (dev->fsa_dev[container].config_waiting_on &&
  1115. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  1116. break;
  1117. dev->fsa_dev[container].config_needed = CHANGE;
  1118. dev->fsa_dev[container].config_waiting_on =
  1119. AifEnConfigChange;
  1120. dev->fsa_dev[container].config_waiting_stamp = jiffies;
  1121. break;
  1122. case AifEnConfigChange:
  1123. break;
  1124. case AifEnAddJBOD:
  1125. case AifEnDeleteJBOD:
  1126. container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
  1127. if ((container >> 28)) {
  1128. container = (u32)-1;
  1129. break;
  1130. }
  1131. channel = (container >> 24) & 0xF;
  1132. if (channel >= dev->maximum_num_channels) {
  1133. container = (u32)-1;
  1134. break;
  1135. }
  1136. id = container & 0xFFFF;
  1137. if (id >= dev->maximum_num_physicals) {
  1138. container = (u32)-1;
  1139. break;
  1140. }
  1141. lun = (container >> 16) & 0xFF;
  1142. container = (u32)-1;
  1143. channel = aac_phys_to_logical(channel);
  1144. device_config_needed =
  1145. (((__le32 *)aifcmd->data)[0] ==
  1146. cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
  1147. if (device_config_needed == ADD) {
  1148. device = scsi_device_lookup(dev->scsi_host_ptr,
  1149. channel,
  1150. id,
  1151. lun);
  1152. if (device) {
  1153. scsi_remove_device(device);
  1154. scsi_device_put(device);
  1155. }
  1156. }
  1157. break;
  1158. case AifEnEnclosureManagement:
  1159. /*
  1160. * If in JBOD mode, automatic exposure of new
  1161. * physical target to be suppressed until configured.
  1162. */
  1163. if (dev->jbod)
  1164. break;
  1165. switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
  1166. case EM_DRIVE_INSERTION:
  1167. case EM_DRIVE_REMOVAL:
  1168. case EM_SES_DRIVE_INSERTION:
  1169. case EM_SES_DRIVE_REMOVAL:
  1170. container = le32_to_cpu(
  1171. ((__le32 *)aifcmd->data)[2]);
  1172. if ((container >> 28)) {
  1173. container = (u32)-1;
  1174. break;
  1175. }
  1176. channel = (container >> 24) & 0xF;
  1177. if (channel >= dev->maximum_num_channels) {
  1178. container = (u32)-1;
  1179. break;
  1180. }
  1181. id = container & 0xFFFF;
  1182. lun = (container >> 16) & 0xFF;
  1183. container = (u32)-1;
  1184. if (id >= dev->maximum_num_physicals) {
  1185. /* legacy dev_t ? */
  1186. if ((0x2000 <= id) || lun || channel ||
  1187. ((channel = (id >> 7) & 0x3F) >=
  1188. dev->maximum_num_channels))
  1189. break;
  1190. lun = (id >> 4) & 7;
  1191. id &= 0xF;
  1192. }
  1193. channel = aac_phys_to_logical(channel);
  1194. device_config_needed =
  1195. ((((__le32 *)aifcmd->data)[3]
  1196. == cpu_to_le32(EM_DRIVE_INSERTION)) ||
  1197. (((__le32 *)aifcmd->data)[3]
  1198. == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
  1199. ADD : DELETE;
  1200. break;
  1201. }
  1202. case AifBuManagerEvent:
  1203. aac_handle_aif_bu(dev, aifcmd);
  1204. break;
  1205. }
  1206. /*
  1207. * If we are waiting on something and this happens to be
  1208. * that thing then set the re-configure flag.
  1209. */
  1210. if (container != (u32)-1) {
  1211. if (container >= dev->maximum_num_containers)
  1212. break;
  1213. if ((dev->fsa_dev[container].config_waiting_on ==
  1214. le32_to_cpu(*(__le32 *)aifcmd->data)) &&
  1215. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  1216. dev->fsa_dev[container].config_waiting_on = 0;
  1217. } else for (container = 0;
  1218. container < dev->maximum_num_containers; ++container) {
  1219. if ((dev->fsa_dev[container].config_waiting_on ==
  1220. le32_to_cpu(*(__le32 *)aifcmd->data)) &&
  1221. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
  1222. dev->fsa_dev[container].config_waiting_on = 0;
  1223. }
  1224. break;
  1225. case AifCmdJobProgress:
  1226. /*
  1227. * These are job progress AIF's. When a Clear is being
  1228. * done on a container it is initially created then hidden from
  1229. * the OS. When the clear completes we don't get a config
  1230. * change so we monitor the job status complete on a clear then
  1231. * wait for a container change.
  1232. */
  1233. if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
  1234. (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
  1235. ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
  1236. for (container = 0;
  1237. container < dev->maximum_num_containers;
  1238. ++container) {
  1239. /*
  1240. * Stomp on all config sequencing for all
  1241. * containers?
  1242. */
  1243. dev->fsa_dev[container].config_waiting_on =
  1244. AifEnContainerChange;
  1245. dev->fsa_dev[container].config_needed = ADD;
  1246. dev->fsa_dev[container].config_waiting_stamp =
  1247. jiffies;
  1248. }
  1249. }
  1250. if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
  1251. ((__le32 *)aifcmd->data)[6] == 0 &&
  1252. ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
  1253. for (container = 0;
  1254. container < dev->maximum_num_containers;
  1255. ++container) {
  1256. /*
  1257. * Stomp on all config sequencing for all
  1258. * containers?
  1259. */
  1260. dev->fsa_dev[container].config_waiting_on =
  1261. AifEnContainerChange;
  1262. dev->fsa_dev[container].config_needed = DELETE;
  1263. dev->fsa_dev[container].config_waiting_stamp =
  1264. jiffies;
  1265. }
  1266. }
  1267. break;
  1268. }
  1269. container = 0;
  1270. retry_next:
  1271. if (device_config_needed == NOTHING)
  1272. for (; container < dev->maximum_num_containers; ++container) {
  1273. if ((dev->fsa_dev[container].config_waiting_on == 0) &&
  1274. (dev->fsa_dev[container].config_needed != NOTHING) &&
  1275. time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
  1276. device_config_needed =
  1277. dev->fsa_dev[container].config_needed;
  1278. dev->fsa_dev[container].config_needed = NOTHING;
  1279. channel = CONTAINER_TO_CHANNEL(container);
  1280. id = CONTAINER_TO_ID(container);
  1281. lun = CONTAINER_TO_LUN(container);
  1282. break;
  1283. }
  1284. }
  1285. if (device_config_needed == NOTHING)
  1286. return;
  1287. /*
  1288. * If we decided that a re-configuration needs to be done,
  1289. * schedule it here on the way out the door, please close the door
  1290. * behind you.
  1291. */
  1292. /*
  1293. * Find the scsi_device associated with the SCSI address,
  1294. * and mark it as changed, invalidating the cache. This deals
  1295. * with changes to existing device IDs.
  1296. */
  1297. if (!dev || !dev->scsi_host_ptr)
  1298. return;
  1299. /*
  1300. * force reload of disk info via aac_probe_container
  1301. */
  1302. if ((channel == CONTAINER_CHANNEL) &&
  1303. (device_config_needed != NOTHING)) {
  1304. if (dev->fsa_dev[container].valid == 1)
  1305. dev->fsa_dev[container].valid = 2;
  1306. aac_probe_container(dev, container);
  1307. }
  1308. device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
  1309. if (device) {
  1310. switch (device_config_needed) {
  1311. case DELETE:
  1312. #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
  1313. scsi_remove_device(device);
  1314. #else
  1315. if (scsi_device_online(device)) {
  1316. scsi_device_set_state(device, SDEV_OFFLINE);
  1317. sdev_printk(KERN_INFO, device,
  1318. "Device offlined - %s\n",
  1319. (channel == CONTAINER_CHANNEL) ?
  1320. "array deleted" :
  1321. "enclosure services event");
  1322. }
  1323. #endif
  1324. break;
  1325. case ADD:
  1326. if (!scsi_device_online(device)) {
  1327. sdev_printk(KERN_INFO, device,
  1328. "Device online - %s\n",
  1329. (channel == CONTAINER_CHANNEL) ?
  1330. "array created" :
  1331. "enclosure services event");
  1332. scsi_device_set_state(device, SDEV_RUNNING);
  1333. }
  1334. /* FALLTHRU */
  1335. case CHANGE:
  1336. if ((channel == CONTAINER_CHANNEL)
  1337. && (!dev->fsa_dev[container].valid)) {
  1338. #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
  1339. scsi_remove_device(device);
  1340. #else
  1341. if (!scsi_device_online(device))
  1342. break;
  1343. scsi_device_set_state(device, SDEV_OFFLINE);
  1344. sdev_printk(KERN_INFO, device,
  1345. "Device offlined - %s\n",
  1346. "array failed");
  1347. #endif
  1348. break;
  1349. }
  1350. scsi_rescan_device(&device->sdev_gendev);
  1351. default:
  1352. break;
  1353. }
  1354. scsi_device_put(device);
  1355. device_config_needed = NOTHING;
  1356. }
  1357. if (device_config_needed == ADD)
  1358. scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
  1359. if (channel == CONTAINER_CHANNEL) {
  1360. container++;
  1361. device_config_needed = NOTHING;
  1362. goto retry_next;
  1363. }
  1364. }
  1365. static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
  1366. {
  1367. int index, quirks;
  1368. int retval;
  1369. struct Scsi_Host *host;
  1370. struct scsi_device *dev;
  1371. struct scsi_cmnd *command;
  1372. struct scsi_cmnd *command_list;
  1373. int jafo = 0;
  1374. int bled;
  1375. /*
  1376. * Assumptions:
  1377. * - host is locked, unless called by the aacraid thread.
  1378. * (a matter of convenience, due to legacy issues surrounding
  1379. * eh_host_adapter_reset).
  1380. * - in_reset is asserted, so no new i/o is getting to the
  1381. * card.
  1382. * - The card is dead, or will be very shortly ;-/ so no new
  1383. * commands are completing in the interrupt service.
  1384. */
  1385. host = aac->scsi_host_ptr;
  1386. scsi_block_requests(host);
  1387. aac_adapter_disable_int(aac);
  1388. if (aac->thread->pid != current->pid) {
  1389. spin_unlock_irq(host->host_lock);
  1390. kthread_stop(aac->thread);
  1391. jafo = 1;
  1392. }
  1393. /*
  1394. * If a positive health, means in a known DEAD PANIC
  1395. * state and the adapter could be reset to `try again'.
  1396. */
  1397. bled = forced ? 0 : aac_adapter_check_health(aac);
  1398. retval = aac_adapter_restart(aac, bled, reset_type);
  1399. if (retval)
  1400. goto out;
  1401. /*
  1402. * Loop through the fibs, close the synchronous FIBS
  1403. */
  1404. for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
  1405. struct fib *fib = &aac->fibs[index];
  1406. if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
  1407. (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
  1408. unsigned long flagv;
  1409. spin_lock_irqsave(&fib->event_lock, flagv);
  1410. up(&fib->event_wait);
  1411. spin_unlock_irqrestore(&fib->event_lock, flagv);
  1412. schedule();
  1413. retval = 0;
  1414. }
  1415. }
  1416. /* Give some extra time for ioctls to complete. */
  1417. if (retval == 0)
  1418. ssleep(2);
  1419. index = aac->cardtype;
  1420. /*
  1421. * Re-initialize the adapter, first free resources, then carefully
  1422. * apply the initialization sequence to come back again. Only risk
  1423. * is a change in Firmware dropping cache, it is assumed the caller
  1424. * will ensure that i/o is queisced and the card is flushed in that
  1425. * case.
  1426. */
  1427. aac_fib_map_free(aac);
  1428. dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
  1429. aac->comm_phys);
  1430. aac->comm_addr = NULL;
  1431. aac->comm_phys = 0;
  1432. kfree(aac->queues);
  1433. aac->queues = NULL;
  1434. aac_free_irq(aac);
  1435. kfree(aac->fsa_dev);
  1436. aac->fsa_dev = NULL;
  1437. quirks = aac_get_driver_ident(index)->quirks;
  1438. if (quirks & AAC_QUIRK_31BIT) {
  1439. if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) ||
  1440. ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31)))))
  1441. goto out;
  1442. } else {
  1443. if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) ||
  1444. ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32)))))
  1445. goto out;
  1446. }
  1447. if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
  1448. goto out;
  1449. if (quirks & AAC_QUIRK_31BIT)
  1450. if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32))))
  1451. goto out;
  1452. if (jafo) {
  1453. aac->thread = kthread_run(aac_command_thread, aac, "%s",
  1454. aac->name);
  1455. if (IS_ERR(aac->thread)) {
  1456. retval = PTR_ERR(aac->thread);
  1457. goto out;
  1458. }
  1459. }
  1460. (void)aac_get_adapter_info(aac);
  1461. if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
  1462. host->sg_tablesize = 34;
  1463. host->max_sectors = (host->sg_tablesize * 8) + 112;
  1464. }
  1465. if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
  1466. host->sg_tablesize = 17;
  1467. host->max_sectors = (host->sg_tablesize * 8) + 112;
  1468. }
  1469. aac_get_config_status(aac, 1);
  1470. aac_get_containers(aac);
  1471. /*
  1472. * This is where the assumption that the Adapter is quiesced
  1473. * is important.
  1474. */
  1475. command_list = NULL;
  1476. __shost_for_each_device(dev, host) {
  1477. unsigned long flags;
  1478. spin_lock_irqsave(&dev->list_lock, flags);
  1479. list_for_each_entry(command, &dev->cmd_list, list)
  1480. if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
  1481. command->SCp.buffer = (struct scatterlist *)command_list;
  1482. command_list = command;
  1483. }
  1484. spin_unlock_irqrestore(&dev->list_lock, flags);
  1485. }
  1486. while ((command = command_list)) {
  1487. command_list = (struct scsi_cmnd *)command->SCp.buffer;
  1488. command->SCp.buffer = NULL;
  1489. command->result = DID_OK << 16
  1490. | COMMAND_COMPLETE << 8
  1491. | SAM_STAT_TASK_SET_FULL;
  1492. command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
  1493. command->scsi_done(command);
  1494. }
  1495. /*
  1496. * Any Device that was already marked offline needs to be cleaned up
  1497. */
  1498. __shost_for_each_device(dev, host) {
  1499. if (!scsi_device_online(dev)) {
  1500. sdev_printk(KERN_INFO, dev, "Removing offline device\n");
  1501. scsi_remove_device(dev);
  1502. scsi_device_put(dev);
  1503. }
  1504. }
  1505. retval = 0;
  1506. out:
  1507. aac->in_reset = 0;
  1508. scsi_unblock_requests(host);
  1509. /*
  1510. * Issue bus rescan to catch any configuration that might have
  1511. * occurred
  1512. */
  1513. if (!retval) {
  1514. dev_info(&aac->pdev->dev, "Issuing bus rescan\n");
  1515. scsi_scan_host(host);
  1516. }
  1517. if (jafo) {
  1518. spin_lock_irq(host->host_lock);
  1519. }
  1520. return retval;
  1521. }
  1522. int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
  1523. {
  1524. unsigned long flagv = 0;
  1525. int retval;
  1526. struct Scsi_Host * host;
  1527. int bled;
  1528. if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
  1529. return -EBUSY;
  1530. if (aac->in_reset) {
  1531. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1532. return -EBUSY;
  1533. }
  1534. aac->in_reset = 1;
  1535. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1536. /*
  1537. * Wait for all commands to complete to this specific
  1538. * target (block maximum 60 seconds). Although not necessary,
  1539. * it does make us a good storage citizen.
  1540. */
  1541. host = aac->scsi_host_ptr;
  1542. scsi_block_requests(host);
  1543. if (forced < 2) for (retval = 60; retval; --retval) {
  1544. struct scsi_device * dev;
  1545. struct scsi_cmnd * command;
  1546. int active = 0;
  1547. __shost_for_each_device(dev, host) {
  1548. spin_lock_irqsave(&dev->list_lock, flagv);
  1549. list_for_each_entry(command, &dev->cmd_list, list) {
  1550. if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
  1551. active++;
  1552. break;
  1553. }
  1554. }
  1555. spin_unlock_irqrestore(&dev->list_lock, flagv);
  1556. if (active)
  1557. break;
  1558. }
  1559. /*
  1560. * We can exit If all the commands are complete
  1561. */
  1562. if (active == 0)
  1563. break;
  1564. ssleep(1);
  1565. }
  1566. /* Quiesce build, flush cache, write through mode */
  1567. if (forced < 2)
  1568. aac_send_shutdown(aac);
  1569. spin_lock_irqsave(host->host_lock, flagv);
  1570. bled = forced ? forced :
  1571. (aac_check_reset != 0 && aac_check_reset != 1);
  1572. retval = _aac_reset_adapter(aac, bled, reset_type);
  1573. spin_unlock_irqrestore(host->host_lock, flagv);
  1574. if ((forced < 2) && (retval == -ENODEV)) {
  1575. /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
  1576. struct fib * fibctx = aac_fib_alloc(aac);
  1577. if (fibctx) {
  1578. struct aac_pause *cmd;
  1579. int status;
  1580. aac_fib_init(fibctx);
  1581. cmd = (struct aac_pause *) fib_data(fibctx);
  1582. cmd->command = cpu_to_le32(VM_ContainerConfig);
  1583. cmd->type = cpu_to_le32(CT_PAUSE_IO);
  1584. cmd->timeout = cpu_to_le32(1);
  1585. cmd->min = cpu_to_le32(1);
  1586. cmd->noRescan = cpu_to_le32(1);
  1587. cmd->count = cpu_to_le32(0);
  1588. status = aac_fib_send(ContainerCommand,
  1589. fibctx,
  1590. sizeof(struct aac_pause),
  1591. FsaNormal,
  1592. -2 /* Timeout silently */, 1,
  1593. NULL, NULL);
  1594. if (status >= 0)
  1595. aac_fib_complete(fibctx);
  1596. /* FIB should be freed only after getting
  1597. * the response from the F/W */
  1598. if (status != -ERESTARTSYS)
  1599. aac_fib_free(fibctx);
  1600. }
  1601. }
  1602. return retval;
  1603. }
  1604. int aac_check_health(struct aac_dev * aac)
  1605. {
  1606. int BlinkLED;
  1607. unsigned long time_now, flagv = 0;
  1608. struct list_head * entry;
  1609. struct Scsi_Host * host;
  1610. int bled;
  1611. /* Extending the scope of fib_lock slightly to protect aac->in_reset */
  1612. if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
  1613. return 0;
  1614. if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
  1615. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1616. return 0; /* OK */
  1617. }
  1618. aac->in_reset = 1;
  1619. /* Fake up an AIF:
  1620. * aac_aifcmd.command = AifCmdEventNotify = 1
  1621. * aac_aifcmd.seqnum = 0xFFFFFFFF
  1622. * aac_aifcmd.data[0] = AifEnExpEvent = 23
  1623. * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
  1624. * aac.aifcmd.data[2] = AifHighPriority = 3
  1625. * aac.aifcmd.data[3] = BlinkLED
  1626. */
  1627. time_now = jiffies/HZ;
  1628. entry = aac->fib_list.next;
  1629. /*
  1630. * For each Context that is on the
  1631. * fibctxList, make a copy of the
  1632. * fib, and then set the event to wake up the
  1633. * thread that is waiting for it.
  1634. */
  1635. while (entry != &aac->fib_list) {
  1636. /*
  1637. * Extract the fibctx
  1638. */
  1639. struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
  1640. struct hw_fib * hw_fib;
  1641. struct fib * fib;
  1642. /*
  1643. * Check if the queue is getting
  1644. * backlogged
  1645. */
  1646. if (fibctx->count > 20) {
  1647. /*
  1648. * It's *not* jiffies folks,
  1649. * but jiffies / HZ, so do not
  1650. * panic ...
  1651. */
  1652. u32 time_last = fibctx->jiffies;
  1653. /*
  1654. * Has it been > 2 minutes
  1655. * since the last read off
  1656. * the queue?
  1657. */
  1658. if ((time_now - time_last) > aif_timeout) {
  1659. entry = entry->next;
  1660. aac_close_fib_context(aac, fibctx);
  1661. continue;
  1662. }
  1663. }
  1664. /*
  1665. * Warning: no sleep allowed while
  1666. * holding spinlock
  1667. */
  1668. hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
  1669. fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
  1670. if (fib && hw_fib) {
  1671. struct aac_aifcmd * aif;
  1672. fib->hw_fib_va = hw_fib;
  1673. fib->dev = aac;
  1674. aac_fib_init(fib);
  1675. fib->type = FSAFS_NTC_FIB_CONTEXT;
  1676. fib->size = sizeof (struct fib);
  1677. fib->data = hw_fib->data;
  1678. aif = (struct aac_aifcmd *)hw_fib->data;
  1679. aif->command = cpu_to_le32(AifCmdEventNotify);
  1680. aif->seqnum = cpu_to_le32(0xFFFFFFFF);
  1681. ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
  1682. ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
  1683. ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
  1684. ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
  1685. /*
  1686. * Put the FIB onto the
  1687. * fibctx's fibs
  1688. */
  1689. list_add_tail(&fib->fiblink, &fibctx->fib_list);
  1690. fibctx->count++;
  1691. /*
  1692. * Set the event to wake up the
  1693. * thread that will waiting.
  1694. */
  1695. up(&fibctx->wait_sem);
  1696. } else {
  1697. printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
  1698. kfree(fib);
  1699. kfree(hw_fib);
  1700. }
  1701. entry = entry->next;
  1702. }
  1703. spin_unlock_irqrestore(&aac->fib_lock, flagv);
  1704. if (BlinkLED < 0) {
  1705. printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
  1706. aac->name, BlinkLED);
  1707. goto out;
  1708. }
  1709. printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
  1710. if (!aac_check_reset || ((aac_check_reset == 1) &&
  1711. (aac->supplement_adapter_info.supported_options2 &
  1712. AAC_OPTION_IGNORE_RESET)))
  1713. goto out;
  1714. host = aac->scsi_host_ptr;
  1715. if (aac->thread->pid != current->pid)
  1716. spin_lock_irqsave(host->host_lock, flagv);
  1717. bled = aac_check_reset != 1 ? 1 : 0;
  1718. _aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
  1719. if (aac->thread->pid != current->pid)
  1720. spin_unlock_irqrestore(host->host_lock, flagv);
  1721. return BlinkLED;
  1722. out:
  1723. aac->in_reset = 0;
  1724. return BlinkLED;
  1725. }
  1726. static void aac_resolve_luns(struct aac_dev *dev)
  1727. {
  1728. int bus, target, channel;
  1729. struct scsi_device *sdev;
  1730. u8 devtype;
  1731. u8 new_devtype;
  1732. for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
  1733. for (target = 0; target < AAC_MAX_TARGETS; target++) {
  1734. if (bus == CONTAINER_CHANNEL)
  1735. channel = CONTAINER_CHANNEL;
  1736. else
  1737. channel = aac_phys_to_logical(bus);
  1738. devtype = dev->hba_map[bus][target].devtype;
  1739. new_devtype = dev->hba_map[bus][target].new_devtype;
  1740. sdev = scsi_device_lookup(dev->scsi_host_ptr, channel,
  1741. target, 0);
  1742. if (!sdev && new_devtype)
  1743. scsi_add_device(dev->scsi_host_ptr, channel,
  1744. target, 0);
  1745. else if (sdev && new_devtype != devtype)
  1746. scsi_remove_device(sdev);
  1747. else if (sdev && new_devtype == devtype)
  1748. scsi_rescan_device(&sdev->sdev_gendev);
  1749. if (sdev)
  1750. scsi_device_put(sdev);
  1751. dev->hba_map[bus][target].devtype = new_devtype;
  1752. }
  1753. }
  1754. }
  1755. /**
  1756. * aac_handle_sa_aif Handle a message from the firmware
  1757. * @dev: Which adapter this fib is from
  1758. * @fibptr: Pointer to fibptr from adapter
  1759. *
  1760. * This routine handles a driver notify fib from the adapter and
  1761. * dispatches it to the appropriate routine for handling.
  1762. */
  1763. static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
  1764. {
  1765. int i, bus, target, container, rcode = 0;
  1766. u32 events = 0;
  1767. struct fib *fib;
  1768. struct scsi_device *sdev;
  1769. if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
  1770. events = SA_AIF_HOTPLUG;
  1771. else if (fibptr->hbacmd_size & SA_AIF_HARDWARE)
  1772. events = SA_AIF_HARDWARE;
  1773. else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE)
  1774. events = SA_AIF_PDEV_CHANGE;
  1775. else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE)
  1776. events = SA_AIF_LDEV_CHANGE;
  1777. else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE)
  1778. events = SA_AIF_BPSTAT_CHANGE;
  1779. else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE)
  1780. events = SA_AIF_BPCFG_CHANGE;
  1781. switch (events) {
  1782. case SA_AIF_HOTPLUG:
  1783. case SA_AIF_HARDWARE:
  1784. case SA_AIF_PDEV_CHANGE:
  1785. case SA_AIF_LDEV_CHANGE:
  1786. case SA_AIF_BPCFG_CHANGE:
  1787. fib = aac_fib_alloc(dev);
  1788. if (!fib) {
  1789. pr_err("aac_handle_sa_aif: out of memory\n");
  1790. return;
  1791. }
  1792. for (bus = 0; bus < AAC_MAX_BUSES; bus++)
  1793. for (target = 0; target < AAC_MAX_TARGETS; target++)
  1794. dev->hba_map[bus][target].new_devtype = 0;
  1795. rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN);
  1796. if (rcode != -ERESTARTSYS)
  1797. aac_fib_free(fib);
  1798. aac_resolve_luns(dev);
  1799. if (events == SA_AIF_LDEV_CHANGE ||
  1800. events == SA_AIF_BPCFG_CHANGE) {
  1801. aac_get_containers(dev);
  1802. for (container = 0; container <
  1803. dev->maximum_num_containers; ++container) {
  1804. sdev = scsi_device_lookup(dev->scsi_host_ptr,
  1805. CONTAINER_CHANNEL,
  1806. container, 0);
  1807. if (dev->fsa_dev[container].valid && !sdev) {
  1808. scsi_add_device(dev->scsi_host_ptr,
  1809. CONTAINER_CHANNEL,
  1810. container, 0);
  1811. } else if (!dev->fsa_dev[container].valid &&
  1812. sdev) {
  1813. scsi_remove_device(sdev);
  1814. scsi_device_put(sdev);
  1815. } else if (sdev) {
  1816. scsi_rescan_device(&sdev->sdev_gendev);
  1817. scsi_device_put(sdev);
  1818. }
  1819. }
  1820. }
  1821. break;
  1822. case SA_AIF_BPSTAT_CHANGE:
  1823. /* currently do nothing */
  1824. break;
  1825. }
  1826. for (i = 1; i <= 10; ++i) {
  1827. events = src_readl(dev, MUnit.IDR);
  1828. if (events & (1<<23)) {
  1829. pr_warn(" AIF not cleared by firmware - %d/%d)\n",
  1830. i, 10);
  1831. ssleep(1);
  1832. }
  1833. }
  1834. }
  1835. static int get_fib_count(struct aac_dev *dev)
  1836. {
  1837. unsigned int num = 0;
  1838. struct list_head *entry;
  1839. unsigned long flagv;
  1840. /*
  1841. * Warning: no sleep allowed while
  1842. * holding spinlock. We take the estimate
  1843. * and pre-allocate a set of fibs outside the
  1844. * lock.
  1845. */
  1846. num = le32_to_cpu(dev->init->r7.adapter_fibs_size)
  1847. / sizeof(struct hw_fib); /* some extra */
  1848. spin_lock_irqsave(&dev->fib_lock, flagv);
  1849. entry = dev->fib_list.next;
  1850. while (entry != &dev->fib_list) {
  1851. entry = entry->next;
  1852. ++num;
  1853. }
  1854. spin_unlock_irqrestore(&dev->fib_lock, flagv);
  1855. return num;
  1856. }
  1857. static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
  1858. struct fib **fib_pool,
  1859. unsigned int num)
  1860. {
  1861. struct hw_fib **hw_fib_p;
  1862. struct fib **fib_p;
  1863. hw_fib_p = hw_fib_pool;
  1864. fib_p = fib_pool;
  1865. while (hw_fib_p < &hw_fib_pool[num]) {
  1866. *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL);
  1867. if (!(*(hw_fib_p++))) {
  1868. --hw_fib_p;
  1869. break;
  1870. }
  1871. *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL);
  1872. if (!(*(fib_p++))) {
  1873. kfree(*(--hw_fib_p));
  1874. break;
  1875. }
  1876. }
  1877. /*
  1878. * Get the actual number of allocated fibs
  1879. */
  1880. num = hw_fib_p - hw_fib_pool;
  1881. return num;
  1882. }
  1883. static void wakeup_fibctx_threads(struct aac_dev *dev,
  1884. struct hw_fib **hw_fib_pool,
  1885. struct fib **fib_pool,
  1886. struct fib *fib,
  1887. struct hw_fib *hw_fib,
  1888. unsigned int num)
  1889. {
  1890. unsigned long flagv;
  1891. struct list_head *entry;
  1892. struct hw_fib **hw_fib_p;
  1893. struct fib **fib_p;
  1894. u32 time_now, time_last;
  1895. struct hw_fib *hw_newfib;
  1896. struct fib *newfib;
  1897. struct aac_fib_context *fibctx;
  1898. time_now = jiffies/HZ;
  1899. spin_lock_irqsave(&dev->fib_lock, flagv);
  1900. entry = dev->fib_list.next;
  1901. /*
  1902. * For each Context that is on the
  1903. * fibctxList, make a copy of the
  1904. * fib, and then set the event to wake up the
  1905. * thread that is waiting for it.
  1906. */
  1907. hw_fib_p = hw_fib_pool;
  1908. fib_p = fib_pool;
  1909. while (entry != &dev->fib_list) {
  1910. /*
  1911. * Extract the fibctx
  1912. */
  1913. fibctx = list_entry(entry, struct aac_fib_context,
  1914. next);
  1915. /*
  1916. * Check if the queue is getting
  1917. * backlogged
  1918. */
  1919. if (fibctx->count > 20) {
  1920. /*
  1921. * It's *not* jiffies folks,
  1922. * but jiffies / HZ so do not
  1923. * panic ...
  1924. */
  1925. time_last = fibctx->jiffies;
  1926. /*
  1927. * Has it been > 2 minutes
  1928. * since the last read off
  1929. * the queue?
  1930. */
  1931. if ((time_now - time_last) > aif_timeout) {
  1932. entry = entry->next;
  1933. aac_close_fib_context(dev, fibctx);
  1934. continue;
  1935. }
  1936. }
  1937. /*
  1938. * Warning: no sleep allowed while
  1939. * holding spinlock
  1940. */
  1941. if (hw_fib_p >= &hw_fib_pool[num]) {
  1942. pr_warn("aifd: didn't allocate NewFib\n");
  1943. entry = entry->next;
  1944. continue;
  1945. }
  1946. hw_newfib = *hw_fib_p;
  1947. *(hw_fib_p++) = NULL;
  1948. newfib = *fib_p;
  1949. *(fib_p++) = NULL;
  1950. /*
  1951. * Make the copy of the FIB
  1952. */
  1953. memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
  1954. memcpy(newfib, fib, sizeof(struct fib));
  1955. newfib->hw_fib_va = hw_newfib;
  1956. /*
  1957. * Put the FIB onto the
  1958. * fibctx's fibs
  1959. */
  1960. list_add_tail(&newfib->fiblink, &fibctx->fib_list);
  1961. fibctx->count++;
  1962. /*
  1963. * Set the event to wake up the
  1964. * thread that is waiting.
  1965. */
  1966. up(&fibctx->wait_sem);
  1967. entry = entry->next;
  1968. }
  1969. /*
  1970. * Set the status of this FIB
  1971. */
  1972. *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
  1973. aac_fib_adapter_complete(fib, sizeof(u32));
  1974. spin_unlock_irqrestore(&dev->fib_lock, flagv);
  1975. }
  1976. static void aac_process_events(struct aac_dev *dev)
  1977. {
  1978. struct hw_fib *hw_fib;
  1979. struct fib *fib;
  1980. unsigned long flags;
  1981. spinlock_t *t_lock;
  1982. t_lock = dev->queues->queue[HostNormCmdQueue].lock;
  1983. spin_lock_irqsave(t_lock, flags);
  1984. while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
  1985. struct list_head *entry;
  1986. struct aac_aifcmd *aifcmd;
  1987. unsigned int num;
  1988. struct hw_fib **hw_fib_pool, **hw_fib_p;
  1989. struct fib **fib_pool, **fib_p;
  1990. set_current_state(TASK_RUNNING);
  1991. entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
  1992. list_del(entry);
  1993. t_lock = dev->queues->queue[HostNormCmdQueue].lock;
  1994. spin_unlock_irqrestore(t_lock, flags);
  1995. fib = list_entry(entry, struct fib, fiblink);
  1996. hw_fib = fib->hw_fib_va;
  1997. if (dev->sa_firmware) {
  1998. /* Thor AIF */
  1999. aac_handle_sa_aif(dev, fib);
  2000. aac_fib_adapter_complete(fib, (u16)sizeof(u32));
  2001. goto free_fib;
  2002. }
  2003. /*
  2004. * We will process the FIB here or pass it to a
  2005. * worker thread that is TBD. We Really can't
  2006. * do anything at this point since we don't have
  2007. * anything defined for this thread to do.
  2008. */
  2009. memset(fib, 0, sizeof(struct fib));
  2010. fib->type = FSAFS_NTC_FIB_CONTEXT;
  2011. fib->size = sizeof(struct fib);
  2012. fib->hw_fib_va = hw_fib;
  2013. fib->data = hw_fib->data;
  2014. fib->dev = dev;
  2015. /*
  2016. * We only handle AifRequest fibs from the adapter.
  2017. */
  2018. aifcmd = (struct aac_aifcmd *) hw_fib->data;
  2019. if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
  2020. /* Handle Driver Notify Events */
  2021. aac_handle_aif(dev, fib);
  2022. *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
  2023. aac_fib_adapter_complete(fib, (u16)sizeof(u32));
  2024. goto free_fib;
  2025. }
  2026. /*
  2027. * The u32 here is important and intended. We are using
  2028. * 32bit wrapping time to fit the adapter field
  2029. */
  2030. /* Sniff events */
  2031. if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)
  2032. || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) {
  2033. aac_handle_aif(dev, fib);
  2034. }
  2035. /*
  2036. * get number of fibs to process
  2037. */
  2038. num = get_fib_count(dev);
  2039. if (!num)
  2040. goto free_fib;
  2041. hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *),
  2042. GFP_KERNEL);
  2043. if (!hw_fib_pool)
  2044. goto free_fib;
  2045. fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL);
  2046. if (!fib_pool)
  2047. goto free_hw_fib_pool;
  2048. /*
  2049. * Fill up fib pointer pools with actual fibs
  2050. * and hw_fibs
  2051. */
  2052. num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
  2053. if (!num)
  2054. goto free_mem;
  2055. /*
  2056. * wakeup the thread that is waiting for
  2057. * the response from fw (ioctl)
  2058. */
  2059. wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool,
  2060. fib, hw_fib, num);
  2061. free_mem:
  2062. /* Free up the remaining resources */
  2063. hw_fib_p = hw_fib_pool;
  2064. fib_p = fib_pool;
  2065. while (hw_fib_p < &hw_fib_pool[num]) {
  2066. kfree(*hw_fib_p);
  2067. kfree(*fib_p);
  2068. ++fib_p;
  2069. ++hw_fib_p;
  2070. }
  2071. kfree(fib_pool);
  2072. free_hw_fib_pool:
  2073. kfree(hw_fib_pool);
  2074. free_fib:
  2075. kfree(fib);
  2076. t_lock = dev->queues->queue[HostNormCmdQueue].lock;
  2077. spin_lock_irqsave(t_lock, flags);
  2078. }
  2079. /*
  2080. * There are no more AIF's
  2081. */
  2082. t_lock = dev->queues->queue[HostNormCmdQueue].lock;
  2083. spin_unlock_irqrestore(t_lock, flags);
  2084. }
  2085. static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
  2086. u32 datasize)
  2087. {
  2088. struct aac_srb *srbcmd;
  2089. struct sgmap64 *sg64;
  2090. dma_addr_t addr;
  2091. char *dma_buf;
  2092. struct fib *fibptr;
  2093. int ret = -ENOMEM;
  2094. u32 vbus, vid;
  2095. fibptr = aac_fib_alloc(dev);
  2096. if (!fibptr)
  2097. goto out;
  2098. dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
  2099. GFP_KERNEL);
  2100. if (!dma_buf)
  2101. goto fib_free_out;
  2102. aac_fib_init(fibptr);
  2103. vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
  2104. vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
  2105. srbcmd = (struct aac_srb *)fib_data(fibptr);
  2106. srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
  2107. srbcmd->channel = cpu_to_le32(vbus);
  2108. srbcmd->id = cpu_to_le32(vid);
  2109. srbcmd->lun = 0;
  2110. srbcmd->flags = cpu_to_le32(SRB_DataOut);
  2111. srbcmd->timeout = cpu_to_le32(10);
  2112. srbcmd->retry_limit = 0;
  2113. srbcmd->cdb_size = cpu_to_le32(12);
  2114. srbcmd->count = cpu_to_le32(datasize);
  2115. memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
  2116. srbcmd->cdb[0] = BMIC_OUT;
  2117. srbcmd->cdb[6] = WRITE_HOST_WELLNESS;
  2118. memcpy(dma_buf, (char *)wellness_str, datasize);
  2119. sg64 = (struct sgmap64 *)&srbcmd->sg;
  2120. sg64->count = cpu_to_le32(1);
  2121. sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
  2122. sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
  2123. sg64->sg[0].count = cpu_to_le32(datasize);
  2124. ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
  2125. FsaNormal, 1, 1, NULL, NULL);
  2126. dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr);
  2127. /*
  2128. * Do not set XferState to zero unless
  2129. * receives a response from F/W
  2130. */
  2131. if (ret >= 0)
  2132. aac_fib_complete(fibptr);
  2133. /*
  2134. * FIB should be freed only after
  2135. * getting the response from the F/W
  2136. */
  2137. if (ret != -ERESTARTSYS)
  2138. goto fib_free_out;
  2139. out:
  2140. return ret;
  2141. fib_free_out:
  2142. aac_fib_free(fibptr);
  2143. goto out;
  2144. }
  2145. int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now)
  2146. {
  2147. struct tm cur_tm;
  2148. char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
  2149. u32 datasize = sizeof(wellness_str);
  2150. unsigned long local_time;
  2151. int ret = -ENODEV;
  2152. if (!dev->sa_firmware)
  2153. goto out;
  2154. local_time = (u32)(now->tv_sec - (sys_tz.tz_minuteswest * 60));
  2155. time_to_tm(local_time, 0, &cur_tm);
  2156. cur_tm.tm_mon += 1;
  2157. cur_tm.tm_year += 1900;
  2158. wellness_str[8] = bin2bcd(cur_tm.tm_hour);
  2159. wellness_str[9] = bin2bcd(cur_tm.tm_min);
  2160. wellness_str[10] = bin2bcd(cur_tm.tm_sec);
  2161. wellness_str[12] = bin2bcd(cur_tm.tm_mon);
  2162. wellness_str[13] = bin2bcd(cur_tm.tm_mday);
  2163. wellness_str[14] = bin2bcd(cur_tm.tm_year / 100);
  2164. wellness_str[15] = bin2bcd(cur_tm.tm_year % 100);
  2165. ret = aac_send_wellness_command(dev, wellness_str, datasize);
  2166. out:
  2167. return ret;
  2168. }
  2169. int aac_send_hosttime(struct aac_dev *dev, struct timeval *now)
  2170. {
  2171. int ret = -ENOMEM;
  2172. struct fib *fibptr;
  2173. __le32 *info;
  2174. fibptr = aac_fib_alloc(dev);
  2175. if (!fibptr)
  2176. goto out;
  2177. aac_fib_init(fibptr);
  2178. info = (__le32 *)fib_data(fibptr);
  2179. *info = cpu_to_le32(now->tv_sec);
  2180. ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
  2181. 1, 1, NULL, NULL);
  2182. /*
  2183. * Do not set XferState to zero unless
  2184. * receives a response from F/W
  2185. */
  2186. if (ret >= 0)
  2187. aac_fib_complete(fibptr);
  2188. /*
  2189. * FIB should be freed only after
  2190. * getting the response from the F/W
  2191. */
  2192. if (ret != -ERESTARTSYS)
  2193. aac_fib_free(fibptr);
  2194. out:
  2195. return ret;
  2196. }
  2197. /**
  2198. * aac_command_thread - command processing thread
  2199. * @dev: Adapter to monitor
  2200. *
  2201. * Waits on the commandready event in it's queue. When the event gets set
  2202. * it will pull FIBs off it's queue. It will continue to pull FIBs off
  2203. * until the queue is empty. When the queue is empty it will wait for
  2204. * more FIBs.
  2205. */
  2206. int aac_command_thread(void *data)
  2207. {
  2208. struct aac_dev *dev = data;
  2209. DECLARE_WAITQUEUE(wait, current);
  2210. unsigned long next_jiffies = jiffies + HZ;
  2211. unsigned long next_check_jiffies = next_jiffies;
  2212. long difference = HZ;
  2213. /*
  2214. * We can only have one thread per adapter for AIF's.
  2215. */
  2216. if (dev->aif_thread)
  2217. return -EINVAL;
  2218. /*
  2219. * Let the DPC know it has a place to send the AIF's to.
  2220. */
  2221. dev->aif_thread = 1;
  2222. add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
  2223. set_current_state(TASK_INTERRUPTIBLE);
  2224. dprintk ((KERN_INFO "aac_command_thread start\n"));
  2225. while (1) {
  2226. aac_process_events(dev);
  2227. /*
  2228. * Background activity
  2229. */
  2230. if ((time_before(next_check_jiffies,next_jiffies))
  2231. && ((difference = next_check_jiffies - jiffies) <= 0)) {
  2232. next_check_jiffies = next_jiffies;
  2233. if (aac_check_health(dev) == 0) {
  2234. difference = ((long)(unsigned)check_interval)
  2235. * HZ;
  2236. next_check_jiffies = jiffies + difference;
  2237. } else if (!dev->queues)
  2238. break;
  2239. }
  2240. if (!time_before(next_check_jiffies,next_jiffies)
  2241. && ((difference = next_jiffies - jiffies) <= 0)) {
  2242. struct timeval now;
  2243. int ret;
  2244. /* Don't even try to talk to adapter if its sick */
  2245. ret = aac_check_health(dev);
  2246. if (ret || !dev->queues)
  2247. break;
  2248. next_check_jiffies = jiffies
  2249. + ((long)(unsigned)check_interval)
  2250. * HZ;
  2251. do_gettimeofday(&now);
  2252. /* Synchronize our watches */
  2253. if (((1000000 - (1000000 / HZ)) > now.tv_usec)
  2254. && (now.tv_usec > (1000000 / HZ)))
  2255. difference = (((1000000 - now.tv_usec) * HZ)
  2256. + 500000) / 1000000;
  2257. else {
  2258. if (now.tv_usec > 500000)
  2259. ++now.tv_sec;
  2260. if (dev->sa_firmware)
  2261. ret =
  2262. aac_send_safw_hostttime(dev, &now);
  2263. else
  2264. ret = aac_send_hosttime(dev, &now);
  2265. difference = (long)(unsigned)update_interval*HZ;
  2266. }
  2267. next_jiffies = jiffies + difference;
  2268. if (time_before(next_check_jiffies,next_jiffies))
  2269. difference = next_check_jiffies - jiffies;
  2270. }
  2271. if (difference <= 0)
  2272. difference = 1;
  2273. set_current_state(TASK_INTERRUPTIBLE);
  2274. if (kthread_should_stop())
  2275. break;
  2276. schedule_timeout(difference);
  2277. if (kthread_should_stop())
  2278. break;
  2279. }
  2280. if (dev->queues)
  2281. remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
  2282. dev->aif_thread = 0;
  2283. return 0;
  2284. }
  2285. int aac_acquire_irq(struct aac_dev *dev)
  2286. {
  2287. int i;
  2288. int j;
  2289. int ret = 0;
  2290. if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
  2291. for (i = 0; i < dev->max_msix; i++) {
  2292. dev->aac_msix[i].vector_no = i;
  2293. dev->aac_msix[i].dev = dev;
  2294. if (request_irq(pci_irq_vector(dev->pdev, i),
  2295. dev->a_ops.adapter_intr,
  2296. 0, "aacraid", &(dev->aac_msix[i]))) {
  2297. printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
  2298. dev->name, dev->id, i);
  2299. for (j = 0 ; j < i ; j++)
  2300. free_irq(pci_irq_vector(dev->pdev, j),
  2301. &(dev->aac_msix[j]));
  2302. pci_disable_msix(dev->pdev);
  2303. ret = -1;
  2304. }
  2305. }
  2306. } else {
  2307. dev->aac_msix[0].vector_no = 0;
  2308. dev->aac_msix[0].dev = dev;
  2309. if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
  2310. IRQF_SHARED, "aacraid",
  2311. &(dev->aac_msix[0])) < 0) {
  2312. if (dev->msi)
  2313. pci_disable_msi(dev->pdev);
  2314. printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
  2315. dev->name, dev->id);
  2316. ret = -1;
  2317. }
  2318. }
  2319. return ret;
  2320. }
  2321. void aac_free_irq(struct aac_dev *dev)
  2322. {
  2323. int i;
  2324. int cpu;
  2325. cpu = cpumask_first(cpu_online_mask);
  2326. if (dev->pdev->device == PMC_DEVICE_S6 ||
  2327. dev->pdev->device == PMC_DEVICE_S7 ||
  2328. dev->pdev->device == PMC_DEVICE_S8 ||
  2329. dev->pdev->device == PMC_DEVICE_S9) {
  2330. if (dev->max_msix > 1) {
  2331. for (i = 0; i < dev->max_msix; i++)
  2332. free_irq(pci_irq_vector(dev->pdev, i),
  2333. &(dev->aac_msix[i]));
  2334. } else {
  2335. free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
  2336. }
  2337. } else {
  2338. free_irq(dev->pdev->irq, dev);
  2339. }
  2340. if (dev->msi)
  2341. pci_disable_msi(dev->pdev);
  2342. else if (dev->max_msix > 1)
  2343. pci_disable_msix(dev->pdev);
  2344. }