qedf_main.c 93 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497
  1. /*
  2. * QLogic FCoE Offload Driver
  3. * Copyright (c) 2016-2017 Cavium Inc.
  4. *
  5. * This software is available under the terms of the GNU General Public License
  6. * (GPL) Version 2, available from the file COPYING in the main directory of
  7. * this source tree.
  8. */
  9. #include <linux/init.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/pci.h>
  13. #include <linux/device.h>
  14. #include <linux/highmem.h>
  15. #include <linux/crc32.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/list.h>
  18. #include <linux/kthread.h>
  19. #include <scsi/libfc.h>
  20. #include <scsi/scsi_host.h>
  21. #include <scsi/fc_frame.h>
  22. #include <linux/if_ether.h>
  23. #include <linux/if_vlan.h>
  24. #include <linux/cpu.h>
  25. #include "qedf.h"
  26. #include <uapi/linux/pci_regs.h>
  27. const struct qed_fcoe_ops *qed_ops;
  28. static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  29. static void qedf_remove(struct pci_dev *pdev);
  30. extern struct qedf_debugfs_ops qedf_debugfs_ops;
  31. extern struct file_operations qedf_dbg_fops;
  32. /*
  33. * Driver module parameters.
  34. */
  35. static unsigned int qedf_dev_loss_tmo = 60;
  36. module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO);
  37. MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
  38. "remote ports (default 60)");
  39. uint qedf_debug = QEDF_LOG_INFO;
  40. module_param_named(debug, qedf_debug, uint, S_IRUGO);
  41. MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
  42. " mask");
  43. static uint qedf_fipvlan_retries = 30;
  44. module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
  45. MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
  46. "before giving up (default 30)");
  47. static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
  48. module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
  49. MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
  50. "(default 1002).");
  51. static uint qedf_default_prio = QEDF_DEFAULT_PRIO;
  52. module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
  53. MODULE_PARM_DESC(default_prio, " Default 802.1q priority for FIP and FCoE"
  54. " traffic (default 3).");
  55. uint qedf_dump_frames;
  56. module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
  57. MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames "
  58. "(default off)");
  59. static uint qedf_queue_depth;
  60. module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO);
  61. MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered "
  62. "by the qedf driver. Default is 0 (use OS default).");
  63. uint qedf_io_tracing;
  64. module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR);
  65. MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions "
  66. "into trace buffer. (default off).");
  67. static uint qedf_max_lun = MAX_FIBRE_LUNS;
  68. module_param_named(max_lun, qedf_max_lun, int, S_IRUGO);
  69. MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver "
  70. "supports. (default 0xffffffff)");
  71. uint qedf_link_down_tmo;
  72. module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO);
  73. MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the "
  74. "link is down by N seconds.");
  75. bool qedf_retry_delay;
  76. module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
  77. MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
  78. "delay handling (default off).");
  79. static uint qedf_dp_module;
  80. module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
  81. MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
  82. "qed module during probe.");
  83. static uint qedf_dp_level = QED_LEVEL_NOTICE;
  84. module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
  85. MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
  86. "during probe (0-3: 0 more verbose).");
  87. struct workqueue_struct *qedf_io_wq;
  88. static struct fcoe_percpu_s qedf_global;
  89. static DEFINE_SPINLOCK(qedf_global_lock);
  90. static struct kmem_cache *qedf_io_work_cache;
  91. void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
  92. {
  93. qedf->vlan_id = vlan_id;
  94. qedf->vlan_id |= qedf_default_prio << VLAN_PRIO_SHIFT;
  95. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x "
  96. "prio=%d.\n", vlan_id, qedf_default_prio);
  97. }
  98. /* Returns true if we have a valid vlan, false otherwise */
  99. static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
  100. {
  101. int rc;
  102. if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
  103. QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n");
  104. return false;
  105. }
  106. while (qedf->fipvlan_retries--) {
  107. if (qedf->vlan_id > 0)
  108. return true;
  109. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  110. "Retry %d.\n", qedf->fipvlan_retries);
  111. init_completion(&qedf->fipvlan_compl);
  112. qedf_fcoe_send_vlan_req(qedf);
  113. rc = wait_for_completion_timeout(&qedf->fipvlan_compl,
  114. 1 * HZ);
  115. if (rc > 0) {
  116. fcoe_ctlr_link_up(&qedf->ctlr);
  117. return true;
  118. }
  119. }
  120. return false;
  121. }
  122. static void qedf_handle_link_update(struct work_struct *work)
  123. {
  124. struct qedf_ctx *qedf =
  125. container_of(work, struct qedf_ctx, link_update.work);
  126. int rc;
  127. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n");
  128. if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
  129. rc = qedf_initiate_fipvlan_req(qedf);
  130. if (rc)
  131. return;
  132. /*
  133. * If we get here then we never received a repsonse to our
  134. * fip vlan request so set the vlan_id to the default and
  135. * tell FCoE that the link is up
  136. */
  137. QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
  138. "response, falling back to default VLAN %d.\n",
  139. qedf_fallback_vlan);
  140. qedf_set_vlan_id(qedf, qedf_fallback_vlan);
  141. /*
  142. * Zero out data_src_addr so we'll update it with the new
  143. * lport port_id
  144. */
  145. eth_zero_addr(qedf->data_src_addr);
  146. fcoe_ctlr_link_up(&qedf->ctlr);
  147. } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
  148. /*
  149. * If we hit here and link_down_tmo_valid is still 1 it means
  150. * that link_down_tmo timed out so set it to 0 to make sure any
  151. * other readers have accurate state.
  152. */
  153. atomic_set(&qedf->link_down_tmo_valid, 0);
  154. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  155. "Calling fcoe_ctlr_link_down().\n");
  156. fcoe_ctlr_link_down(&qedf->ctlr);
  157. qedf_wait_for_upload(qedf);
  158. /* Reset the number of FIP VLAN retries */
  159. qedf->fipvlan_retries = qedf_fipvlan_retries;
  160. }
  161. }
  162. #define QEDF_FCOE_MAC_METHOD_GRANGED_MAC 1
  163. #define QEDF_FCOE_MAC_METHOD_FCF_MAP 2
  164. #define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC 3
  165. static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
  166. {
  167. u8 *granted_mac;
  168. struct fc_frame_header *fh = fc_frame_header_get(fp);
  169. u8 fc_map[3];
  170. int method = 0;
  171. /* Get granted MAC address from FIP FLOGI payload */
  172. granted_mac = fr_cb(fp)->granted_mac;
  173. /*
  174. * We set the source MAC for FCoE traffic based on the Granted MAC
  175. * address from the switch.
  176. *
  177. * If granted_mac is non-zero, we used that.
  178. * If the granted_mac is zeroed out, created the FCoE MAC based on
  179. * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
  180. * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the
  181. * d_id of the FLOGI frame.
  182. */
  183. if (!is_zero_ether_addr(granted_mac)) {
  184. ether_addr_copy(qedf->data_src_addr, granted_mac);
  185. method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC;
  186. } else if (qedf->ctlr.sel_fcf->fc_map != 0) {
  187. hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
  188. qedf->data_src_addr[0] = fc_map[0];
  189. qedf->data_src_addr[1] = fc_map[1];
  190. qedf->data_src_addr[2] = fc_map[2];
  191. qedf->data_src_addr[3] = fh->fh_d_id[0];
  192. qedf->data_src_addr[4] = fh->fh_d_id[1];
  193. qedf->data_src_addr[5] = fh->fh_d_id[2];
  194. method = QEDF_FCOE_MAC_METHOD_FCF_MAP;
  195. } else {
  196. fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
  197. method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC;
  198. }
  199. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  200. "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
  201. }
  202. static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
  203. void *arg)
  204. {
  205. struct fc_exch *exch = fc_seq_exch(seq);
  206. struct fc_lport *lport = exch->lp;
  207. struct qedf_ctx *qedf = lport_priv(lport);
  208. if (!qedf) {
  209. QEDF_ERR(NULL, "qedf is NULL.\n");
  210. return;
  211. }
  212. /*
  213. * If ERR_PTR is set then don't try to stat anything as it will cause
  214. * a crash when we access fp.
  215. */
  216. if (IS_ERR(fp)) {
  217. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  218. "fp has IS_ERR() set.\n");
  219. goto skip_stat;
  220. }
  221. /* Log stats for FLOGI reject */
  222. if (fc_frame_payload_op(fp) == ELS_LS_RJT)
  223. qedf->flogi_failed++;
  224. else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
  225. /* Set the source MAC we will use for FCoE traffic */
  226. qedf_set_data_src_addr(qedf, fp);
  227. }
  228. /* Complete flogi_compl so we can proceed to sending ADISCs */
  229. complete(&qedf->flogi_compl);
  230. skip_stat:
  231. /* Report response to libfc */
  232. fc_lport_flogi_resp(seq, fp, lport);
  233. }
  234. static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
  235. struct fc_frame *fp, unsigned int op,
  236. void (*resp)(struct fc_seq *,
  237. struct fc_frame *,
  238. void *),
  239. void *arg, u32 timeout)
  240. {
  241. struct qedf_ctx *qedf = lport_priv(lport);
  242. /*
  243. * Intercept FLOGI for statistic purposes. Note we use the resp
  244. * callback to tell if this is really a flogi.
  245. */
  246. if (resp == fc_lport_flogi_resp) {
  247. qedf->flogi_cnt++;
  248. return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
  249. arg, timeout);
  250. }
  251. return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
  252. }
  253. int qedf_send_flogi(struct qedf_ctx *qedf)
  254. {
  255. struct fc_lport *lport;
  256. struct fc_frame *fp;
  257. lport = qedf->lport;
  258. if (!lport->tt.elsct_send)
  259. return -EINVAL;
  260. fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
  261. if (!fp) {
  262. QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
  263. return -ENOMEM;
  264. }
  265. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
  266. "Sending FLOGI to reestablish session with switch.\n");
  267. lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
  268. ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov);
  269. init_completion(&qedf->flogi_compl);
  270. return 0;
  271. }
  272. struct qedf_tmp_rdata_item {
  273. struct fc_rport_priv *rdata;
  274. struct list_head list;
  275. };
  276. /*
  277. * This function is called if link_down_tmo is in use. If we get a link up and
  278. * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
  279. * sessions with targets. Otherwise, just call fcoe_ctlr_link_up().
  280. */
  281. static void qedf_link_recovery(struct work_struct *work)
  282. {
  283. struct qedf_ctx *qedf =
  284. container_of(work, struct qedf_ctx, link_recovery.work);
  285. struct qedf_rport *fcport;
  286. struct fc_rport_priv *rdata;
  287. struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item;
  288. bool rc;
  289. int retries = 30;
  290. int rval, i;
  291. struct list_head rdata_login_list;
  292. INIT_LIST_HEAD(&rdata_login_list);
  293. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  294. "Link down tmo did not expire.\n");
  295. /*
  296. * Essentially reset the fcoe_ctlr here without affecting the state
  297. * of the libfc structs.
  298. */
  299. qedf->ctlr.state = FIP_ST_LINK_WAIT;
  300. fcoe_ctlr_link_down(&qedf->ctlr);
  301. /*
  302. * Bring the link up before we send the fipvlan request so libfcoe
  303. * can select a new fcf in parallel
  304. */
  305. fcoe_ctlr_link_up(&qedf->ctlr);
  306. /* Since the link when down and up to verify which vlan we're on */
  307. qedf->fipvlan_retries = qedf_fipvlan_retries;
  308. rc = qedf_initiate_fipvlan_req(qedf);
  309. /* If getting the VLAN fails, set the VLAN to the fallback one */
  310. if (!rc)
  311. qedf_set_vlan_id(qedf, qedf_fallback_vlan);
  312. /*
  313. * We need to wait for an FCF to be selected due to the
  314. * fcoe_ctlr_link_up other the FLOGI will be rejected.
  315. */
  316. while (retries > 0) {
  317. if (qedf->ctlr.sel_fcf) {
  318. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  319. "FCF reselected, proceeding with FLOGI.\n");
  320. break;
  321. }
  322. msleep(500);
  323. retries--;
  324. }
  325. if (retries < 1) {
  326. QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
  327. "FCF selection.\n");
  328. return;
  329. }
  330. rval = qedf_send_flogi(qedf);
  331. if (rval)
  332. return;
  333. /* Wait for FLOGI completion before proceeding with sending ADISCs */
  334. i = wait_for_completion_timeout(&qedf->flogi_compl,
  335. qedf->lport->r_a_tov);
  336. if (i == 0) {
  337. QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
  338. return;
  339. }
  340. /*
  341. * Call lport->tt.rport_login which will cause libfc to send an
  342. * ADISC since the rport is in state ready.
  343. */
  344. rcu_read_lock();
  345. list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
  346. rdata = fcport->rdata;
  347. if (rdata == NULL)
  348. continue;
  349. rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item),
  350. GFP_ATOMIC);
  351. if (!rdata_item)
  352. continue;
  353. if (kref_get_unless_zero(&rdata->kref)) {
  354. rdata_item->rdata = rdata;
  355. list_add(&rdata_item->list, &rdata_login_list);
  356. } else
  357. kfree(rdata_item);
  358. }
  359. rcu_read_unlock();
  360. /*
  361. * Do the fc_rport_login outside of the rcu lock so we don't take a
  362. * mutex in an atomic context.
  363. */
  364. list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list,
  365. list) {
  366. list_del(&rdata_item->list);
  367. fc_rport_login(rdata_item->rdata);
  368. kref_put(&rdata_item->rdata->kref, fc_rport_destroy);
  369. kfree(rdata_item);
  370. }
  371. }
  372. static void qedf_update_link_speed(struct qedf_ctx *qedf,
  373. struct qed_link_output *link)
  374. {
  375. struct fc_lport *lport = qedf->lport;
  376. lport->link_speed = FC_PORTSPEED_UNKNOWN;
  377. lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
  378. /* Set fc_host link speed */
  379. switch (link->speed) {
  380. case 10000:
  381. lport->link_speed = FC_PORTSPEED_10GBIT;
  382. break;
  383. case 25000:
  384. lport->link_speed = FC_PORTSPEED_25GBIT;
  385. break;
  386. case 40000:
  387. lport->link_speed = FC_PORTSPEED_40GBIT;
  388. break;
  389. case 50000:
  390. lport->link_speed = FC_PORTSPEED_50GBIT;
  391. break;
  392. case 100000:
  393. lport->link_speed = FC_PORTSPEED_100GBIT;
  394. break;
  395. default:
  396. lport->link_speed = FC_PORTSPEED_UNKNOWN;
  397. break;
  398. }
  399. /*
  400. * Set supported link speed by querying the supported
  401. * capabilities of the link.
  402. */
  403. if (link->supported_caps & SUPPORTED_10000baseKR_Full)
  404. lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
  405. if (link->supported_caps & SUPPORTED_25000baseKR_Full)
  406. lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
  407. if (link->supported_caps & SUPPORTED_40000baseLR4_Full)
  408. lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
  409. if (link->supported_caps & SUPPORTED_50000baseKR2_Full)
  410. lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
  411. if (link->supported_caps & SUPPORTED_100000baseKR4_Full)
  412. lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
  413. fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
  414. }
  415. static void qedf_link_update(void *dev, struct qed_link_output *link)
  416. {
  417. struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
  418. if (link->link_up) {
  419. QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
  420. link->speed / 1000);
  421. /* Cancel any pending link down work */
  422. cancel_delayed_work(&qedf->link_update);
  423. atomic_set(&qedf->link_state, QEDF_LINK_UP);
  424. qedf_update_link_speed(qedf, link);
  425. if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
  426. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  427. "DCBx done.\n");
  428. if (atomic_read(&qedf->link_down_tmo_valid) > 0)
  429. queue_delayed_work(qedf->link_update_wq,
  430. &qedf->link_recovery, 0);
  431. else
  432. queue_delayed_work(qedf->link_update_wq,
  433. &qedf->link_update, 0);
  434. atomic_set(&qedf->link_down_tmo_valid, 0);
  435. }
  436. } else {
  437. QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
  438. atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
  439. atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
  440. /*
  441. * Flag that we're waiting for the link to come back up before
  442. * informing the fcoe layer of the event.
  443. */
  444. if (qedf_link_down_tmo > 0) {
  445. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  446. "Starting link down tmo.\n");
  447. atomic_set(&qedf->link_down_tmo_valid, 1);
  448. }
  449. qedf->vlan_id = 0;
  450. qedf_update_link_speed(qedf, link);
  451. queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
  452. qedf_link_down_tmo * HZ);
  453. }
  454. }
  455. static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
  456. {
  457. struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
  458. QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
  459. "prio=%d.\n", get->operational.valid, get->operational.enabled,
  460. get->operational.app_prio.fcoe);
  461. if (get->operational.enabled && get->operational.valid) {
  462. /* If DCBX was already negotiated on link up then just exit */
  463. if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
  464. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  465. "DCBX already set on link up.\n");
  466. return;
  467. }
  468. atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
  469. if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
  470. if (atomic_read(&qedf->link_down_tmo_valid) > 0)
  471. queue_delayed_work(qedf->link_update_wq,
  472. &qedf->link_recovery, 0);
  473. else
  474. queue_delayed_work(qedf->link_update_wq,
  475. &qedf->link_update, 0);
  476. atomic_set(&qedf->link_down_tmo_valid, 0);
  477. }
  478. }
  479. }
  480. static u32 qedf_get_login_failures(void *cookie)
  481. {
  482. struct qedf_ctx *qedf;
  483. qedf = (struct qedf_ctx *)cookie;
  484. return qedf->flogi_failed;
  485. }
  486. static struct qed_fcoe_cb_ops qedf_cb_ops = {
  487. {
  488. .link_update = qedf_link_update,
  489. .dcbx_aen = qedf_dcbx_handler,
  490. }
  491. };
  492. /*
  493. * Various transport templates.
  494. */
  495. static struct scsi_transport_template *qedf_fc_transport_template;
  496. static struct scsi_transport_template *qedf_fc_vport_transport_template;
  497. /*
  498. * SCSI EH handlers
  499. */
  500. static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
  501. {
  502. struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
  503. struct fc_rport_libfc_priv *rp = rport->dd_data;
  504. struct qedf_rport *fcport;
  505. struct fc_lport *lport;
  506. struct qedf_ctx *qedf;
  507. struct qedf_ioreq *io_req;
  508. int rc = FAILED;
  509. int rval;
  510. if (fc_remote_port_chkready(rport)) {
  511. QEDF_ERR(NULL, "rport not ready\n");
  512. goto out;
  513. }
  514. lport = shost_priv(sc_cmd->device->host);
  515. qedf = (struct qedf_ctx *)lport_priv(lport);
  516. if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
  517. QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n");
  518. goto out;
  519. }
  520. fcport = (struct qedf_rport *)&rp[1];
  521. io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
  522. if (!io_req) {
  523. QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n");
  524. rc = SUCCESS;
  525. goto out;
  526. }
  527. if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
  528. test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
  529. test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
  530. QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
  531. "cleanup or abort processing or already "
  532. "completed.\n", io_req->xid);
  533. rc = SUCCESS;
  534. goto out;
  535. }
  536. QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x "
  537. "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx);
  538. if (qedf->stop_io_on_error) {
  539. qedf_stop_all_io(qedf);
  540. rc = SUCCESS;
  541. goto out;
  542. }
  543. init_completion(&io_req->abts_done);
  544. rval = qedf_initiate_abts(io_req, true);
  545. if (rval) {
  546. QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
  547. goto out;
  548. }
  549. wait_for_completion(&io_req->abts_done);
  550. if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS ||
  551. io_req->event == QEDF_IOREQ_EV_ABORT_FAILED ||
  552. io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) {
  553. /*
  554. * If we get a reponse to the abort this is success from
  555. * the perspective that all references to the command have
  556. * been removed from the driver and firmware
  557. */
  558. rc = SUCCESS;
  559. } else {
  560. /* If the abort and cleanup failed then return a failure */
  561. rc = FAILED;
  562. }
  563. if (rc == SUCCESS)
  564. QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
  565. io_req->xid);
  566. else
  567. QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
  568. io_req->xid);
  569. out:
  570. return rc;
  571. }
  572. static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
  573. {
  574. QEDF_ERR(NULL, "TARGET RESET Issued...");
  575. return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
  576. }
  577. static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
  578. {
  579. QEDF_ERR(NULL, "LUN RESET Issued...\n");
  580. return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
  581. }
  582. void qedf_wait_for_upload(struct qedf_ctx *qedf)
  583. {
  584. while (1) {
  585. if (atomic_read(&qedf->num_offloads))
  586. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  587. "Waiting for all uploads to complete.\n");
  588. else
  589. break;
  590. msleep(500);
  591. }
  592. }
  593. /* Performs soft reset of qedf_ctx by simulating a link down/up */
  594. static void qedf_ctx_soft_reset(struct fc_lport *lport)
  595. {
  596. struct qedf_ctx *qedf;
  597. if (lport->vport) {
  598. QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
  599. return;
  600. }
  601. qedf = lport_priv(lport);
  602. /* For host reset, essentially do a soft link up/down */
  603. atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
  604. atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
  605. queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
  606. 0);
  607. qedf_wait_for_upload(qedf);
  608. atomic_set(&qedf->link_state, QEDF_LINK_UP);
  609. qedf->vlan_id = 0;
  610. queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
  611. 0);
  612. }
  613. /* Reset the host by gracefully logging out and then logging back in */
  614. static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
  615. {
  616. struct fc_lport *lport;
  617. struct qedf_ctx *qedf;
  618. lport = shost_priv(sc_cmd->device->host);
  619. qedf = lport_priv(lport);
  620. if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
  621. test_bit(QEDF_UNLOADING, &qedf->flags))
  622. return FAILED;
  623. QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
  624. qedf_ctx_soft_reset(lport);
  625. return SUCCESS;
  626. }
  627. static int qedf_slave_configure(struct scsi_device *sdev)
  628. {
  629. if (qedf_queue_depth) {
  630. scsi_change_queue_depth(sdev, qedf_queue_depth);
  631. }
  632. return 0;
  633. }
  634. static struct scsi_host_template qedf_host_template = {
  635. .module = THIS_MODULE,
  636. .name = QEDF_MODULE_NAME,
  637. .this_id = -1,
  638. .cmd_per_lun = 32,
  639. .use_clustering = ENABLE_CLUSTERING,
  640. .max_sectors = 0xffff,
  641. .queuecommand = qedf_queuecommand,
  642. .shost_attrs = qedf_host_attrs,
  643. .eh_abort_handler = qedf_eh_abort,
  644. .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
  645. .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
  646. .eh_host_reset_handler = qedf_eh_host_reset,
  647. .slave_configure = qedf_slave_configure,
  648. .dma_boundary = QED_HW_DMA_BOUNDARY,
  649. .sg_tablesize = QEDF_MAX_BDS_PER_CMD,
  650. .can_queue = FCOE_PARAMS_NUM_TASKS,
  651. .change_queue_depth = scsi_change_queue_depth,
  652. };
  653. static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
  654. {
  655. int rc;
  656. spin_lock(&qedf_global_lock);
  657. rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global);
  658. spin_unlock(&qedf_global_lock);
  659. return rc;
  660. }
  661. static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
  662. {
  663. struct qedf_rport *fcport;
  664. struct fc_rport_priv *rdata;
  665. rcu_read_lock();
  666. list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
  667. rdata = fcport->rdata;
  668. if (rdata == NULL)
  669. continue;
  670. if (rdata->ids.port_id == port_id) {
  671. rcu_read_unlock();
  672. return fcport;
  673. }
  674. }
  675. rcu_read_unlock();
  676. /* Return NULL to caller to let them know fcport was not found */
  677. return NULL;
  678. }
  679. /* Transmits an ELS frame over an offloaded session */
  680. static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
  681. {
  682. struct fc_frame_header *fh;
  683. int rc = 0;
  684. fh = fc_frame_header_get(fp);
  685. if ((fh->fh_type == FC_TYPE_ELS) &&
  686. (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  687. switch (fc_frame_payload_op(fp)) {
  688. case ELS_ADISC:
  689. qedf_send_adisc(fcport, fp);
  690. rc = 1;
  691. break;
  692. }
  693. }
  694. return rc;
  695. }
  696. /**
  697. * qedf_xmit - qedf FCoE frame transmit function
  698. *
  699. */
  700. static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
  701. {
  702. struct fc_lport *base_lport;
  703. struct qedf_ctx *qedf;
  704. struct ethhdr *eh;
  705. struct fcoe_crc_eof *cp;
  706. struct sk_buff *skb;
  707. struct fc_frame_header *fh;
  708. struct fcoe_hdr *hp;
  709. u8 sof, eof;
  710. u32 crc;
  711. unsigned int hlen, tlen, elen;
  712. int wlen;
  713. struct fc_stats *stats;
  714. struct fc_lport *tmp_lport;
  715. struct fc_lport *vn_port = NULL;
  716. struct qedf_rport *fcport;
  717. int rc;
  718. u16 vlan_tci = 0;
  719. qedf = (struct qedf_ctx *)lport_priv(lport);
  720. fh = fc_frame_header_get(fp);
  721. skb = fp_skb(fp);
  722. /* Filter out traffic to other NPIV ports on the same host */
  723. if (lport->vport)
  724. base_lport = shost_priv(vport_to_shost(lport->vport));
  725. else
  726. base_lport = lport;
  727. /* Flag if the destination is the base port */
  728. if (base_lport->port_id == ntoh24(fh->fh_d_id)) {
  729. vn_port = base_lport;
  730. } else {
  731. /* Got through the list of vports attached to the base_lport
  732. * and see if we have a match with the destination address.
  733. */
  734. list_for_each_entry(tmp_lport, &base_lport->vports, list) {
  735. if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) {
  736. vn_port = tmp_lport;
  737. break;
  738. }
  739. }
  740. }
  741. if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) {
  742. struct fc_rport_priv *rdata = NULL;
  743. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  744. "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
  745. kfree_skb(skb);
  746. rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
  747. if (rdata)
  748. rdata->retries = lport->max_rport_retry_count;
  749. return -EINVAL;
  750. }
  751. /* End NPIV filtering */
  752. if (!qedf->ctlr.sel_fcf) {
  753. kfree_skb(skb);
  754. return 0;
  755. }
  756. if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
  757. QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
  758. kfree_skb(skb);
  759. return 0;
  760. }
  761. if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
  762. QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
  763. kfree_skb(skb);
  764. return 0;
  765. }
  766. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
  767. if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
  768. return 0;
  769. }
  770. /* Check to see if this needs to be sent on an offloaded session */
  771. fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
  772. if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  773. rc = qedf_xmit_l2_frame(fcport, fp);
  774. /*
  775. * If the frame was successfully sent over the middle path
  776. * then do not try to also send it over the LL2 path
  777. */
  778. if (rc)
  779. return 0;
  780. }
  781. sof = fr_sof(fp);
  782. eof = fr_eof(fp);
  783. elen = sizeof(struct ethhdr);
  784. hlen = sizeof(struct fcoe_hdr);
  785. tlen = sizeof(struct fcoe_crc_eof);
  786. wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
  787. skb->ip_summed = CHECKSUM_NONE;
  788. crc = fcoe_fc_crc(fp);
  789. /* copy port crc and eof to the skb buff */
  790. if (skb_is_nonlinear(skb)) {
  791. skb_frag_t *frag;
  792. if (qedf_get_paged_crc_eof(skb, tlen)) {
  793. kfree_skb(skb);
  794. return -ENOMEM;
  795. }
  796. frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
  797. cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
  798. } else {
  799. cp = skb_put(skb, tlen);
  800. }
  801. memset(cp, 0, sizeof(*cp));
  802. cp->fcoe_eof = eof;
  803. cp->fcoe_crc32 = cpu_to_le32(~crc);
  804. if (skb_is_nonlinear(skb)) {
  805. kunmap_atomic(cp);
  806. cp = NULL;
  807. }
  808. /* adjust skb network/transport offsets to match mac/fcoe/port */
  809. skb_push(skb, elen + hlen);
  810. skb_reset_mac_header(skb);
  811. skb_reset_network_header(skb);
  812. skb->mac_len = elen;
  813. skb->protocol = htons(ETH_P_FCOE);
  814. /*
  815. * Add VLAN tag to non-offload FCoE frame based on current stored VLAN
  816. * for FIP/FCoE traffic.
  817. */
  818. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
  819. /* fill up mac and fcoe headers */
  820. eh = eth_hdr(skb);
  821. eh->h_proto = htons(ETH_P_FCOE);
  822. if (qedf->ctlr.map_dest)
  823. fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
  824. else
  825. /* insert GW address */
  826. ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
  827. /* Set the source MAC address */
  828. ether_addr_copy(eh->h_source, qedf->data_src_addr);
  829. hp = (struct fcoe_hdr *)(eh + 1);
  830. memset(hp, 0, sizeof(*hp));
  831. if (FC_FCOE_VER)
  832. FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
  833. hp->fcoe_sof = sof;
  834. /*update tx stats */
  835. stats = per_cpu_ptr(lport->stats, get_cpu());
  836. stats->TxFrames++;
  837. stats->TxWords += wlen;
  838. put_cpu();
  839. /* Get VLAN ID from skb for printing purposes */
  840. __vlan_hwaccel_get_tag(skb, &vlan_tci);
  841. /* send down to lld */
  842. fr_dev(fp) = lport;
  843. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
  844. "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n",
  845. ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type,
  846. vlan_tci);
  847. if (qedf_dump_frames)
  848. print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
  849. 1, skb->data, skb->len, false);
  850. qed_ops->ll2->start_xmit(qedf->cdev, skb);
  851. return 0;
  852. }
  853. static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
  854. {
  855. int rval = 0;
  856. u32 *pbl;
  857. dma_addr_t page;
  858. int num_pages;
  859. /* Calculate appropriate queue and PBL sizes */
  860. fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
  861. fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE);
  862. fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) *
  863. sizeof(void *);
  864. fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
  865. fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev,
  866. fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL);
  867. if (!fcport->sq) {
  868. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
  869. rval = 1;
  870. goto out;
  871. }
  872. fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev,
  873. fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL);
  874. if (!fcport->sq_pbl) {
  875. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
  876. rval = 1;
  877. goto out_free_sq;
  878. }
  879. /* Create PBL */
  880. num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
  881. page = fcport->sq_dma;
  882. pbl = (u32 *)fcport->sq_pbl;
  883. while (num_pages--) {
  884. *pbl = U64_LO(page);
  885. pbl++;
  886. *pbl = U64_HI(page);
  887. pbl++;
  888. page += QEDF_PAGE_SIZE;
  889. }
  890. return rval;
  891. out_free_sq:
  892. dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq,
  893. fcport->sq_dma);
  894. out:
  895. return rval;
  896. }
  897. static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
  898. {
  899. if (fcport->sq_pbl)
  900. dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size,
  901. fcport->sq_pbl, fcport->sq_pbl_dma);
  902. if (fcport->sq)
  903. dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
  904. fcport->sq, fcport->sq_dma);
  905. }
  906. static int qedf_offload_connection(struct qedf_ctx *qedf,
  907. struct qedf_rport *fcport)
  908. {
  909. struct qed_fcoe_params_offload conn_info;
  910. u32 port_id;
  911. int rval;
  912. uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
  913. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
  914. "portid=%06x.\n", fcport->rdata->ids.port_id);
  915. rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
  916. &fcport->fw_cid, &fcport->p_doorbell);
  917. if (rval) {
  918. QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
  919. "for portid=%06x.\n", fcport->rdata->ids.port_id);
  920. rval = 1; /* For some reason qed returns 0 on failure here */
  921. goto out;
  922. }
  923. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
  924. "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id,
  925. fcport->fw_cid, fcport->handle);
  926. memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload));
  927. /* Fill in the offload connection info */
  928. conn_info.sq_pbl_addr = fcport->sq_pbl_dma;
  929. conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl);
  930. conn_info.sq_next_page_addr =
  931. (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
  932. /* Need to use our FCoE MAC for the offload session */
  933. ether_addr_copy(conn_info.src_mac, qedf->data_src_addr);
  934. ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
  935. conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
  936. conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20;
  937. conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
  938. conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
  939. /* Set VLAN data */
  940. conn_info.vlan_tag = qedf->vlan_id <<
  941. FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
  942. conn_info.vlan_tag |=
  943. qedf_default_prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
  944. conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
  945. FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
  946. /* Set host port source id */
  947. port_id = fc_host_port_id(qedf->lport->host);
  948. fcport->sid = port_id;
  949. conn_info.s_id.addr_hi = (port_id & 0x000000FF);
  950. conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8;
  951. conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16;
  952. conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq;
  953. /* Set remote port destination id */
  954. port_id = fcport->rdata->rport->port_id;
  955. conn_info.d_id.addr_hi = (port_id & 0x000000FF);
  956. conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8;
  957. conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16;
  958. conn_info.def_q_idx = 0; /* Default index for send queue? */
  959. /* Set FC-TAPE specific flags if needed */
  960. if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
  961. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
  962. "Enable CONF, REC for portid=%06x.\n",
  963. fcport->rdata->ids.port_id);
  964. conn_info.flags |= 1 <<
  965. FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT;
  966. conn_info.flags |=
  967. ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
  968. FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT;
  969. }
  970. rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
  971. if (rval) {
  972. QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
  973. "for portid=%06x.\n", fcport->rdata->ids.port_id);
  974. goto out_free_conn;
  975. } else
  976. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
  977. "succeeded portid=%06x total_sqe=%d.\n",
  978. fcport->rdata->ids.port_id, total_sqe);
  979. spin_lock_init(&fcport->rport_lock);
  980. atomic_set(&fcport->free_sqes, total_sqe);
  981. return 0;
  982. out_free_conn:
  983. qed_ops->release_conn(qedf->cdev, fcport->handle);
  984. out:
  985. return rval;
  986. }
  987. #define QEDF_TERM_BUFF_SIZE 10
  988. static void qedf_upload_connection(struct qedf_ctx *qedf,
  989. struct qedf_rport *fcport)
  990. {
  991. void *term_params;
  992. dma_addr_t term_params_dma;
  993. /* Term params needs to be a DMA coherent buffer as qed shared the
  994. * physical DMA address with the firmware. The buffer may be used in
  995. * the receive path so we may eventually have to move this.
  996. */
  997. term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
  998. &term_params_dma, GFP_KERNEL);
  999. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
  1000. "port_id=%06x.\n", fcport->rdata->ids.port_id);
  1001. qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
  1002. qed_ops->release_conn(qedf->cdev, fcport->handle);
  1003. dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params,
  1004. term_params_dma);
  1005. }
  1006. static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
  1007. struct qedf_rport *fcport)
  1008. {
  1009. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
  1010. fcport->rdata->ids.port_id);
  1011. /* Flush any remaining i/o's before we upload the connection */
  1012. qedf_flush_active_ios(fcport, -1);
  1013. if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))
  1014. qedf_upload_connection(qedf, fcport);
  1015. qedf_free_sq(qedf, fcport);
  1016. fcport->rdata = NULL;
  1017. fcport->qedf = NULL;
  1018. }
  1019. /**
  1020. * This event_callback is called after successful completion of libfc
  1021. * initiated target login. qedf can proceed with initiating the session
  1022. * establishment.
  1023. */
  1024. static void qedf_rport_event_handler(struct fc_lport *lport,
  1025. struct fc_rport_priv *rdata,
  1026. enum fc_rport_event event)
  1027. {
  1028. struct qedf_ctx *qedf = lport_priv(lport);
  1029. struct fc_rport *rport = rdata->rport;
  1030. struct fc_rport_libfc_priv *rp;
  1031. struct qedf_rport *fcport;
  1032. u32 port_id;
  1033. int rval;
  1034. unsigned long flags;
  1035. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
  1036. "port_id = 0x%x\n", event, rdata->ids.port_id);
  1037. switch (event) {
  1038. case RPORT_EV_READY:
  1039. if (!rport) {
  1040. QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
  1041. break;
  1042. }
  1043. rp = rport->dd_data;
  1044. fcport = (struct qedf_rport *)&rp[1];
  1045. fcport->qedf = qedf;
  1046. if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
  1047. QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
  1048. "portid=0x%x as max number of offloaded sessions "
  1049. "reached.\n", rdata->ids.port_id);
  1050. return;
  1051. }
  1052. /*
  1053. * Don't try to offload the session again. Can happen when we
  1054. * get an ADISC
  1055. */
  1056. if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  1057. QEDF_WARN(&(qedf->dbg_ctx), "Session already "
  1058. "offloaded, portid=0x%x.\n",
  1059. rdata->ids.port_id);
  1060. return;
  1061. }
  1062. if (rport->port_id == FC_FID_DIR_SERV) {
  1063. /*
  1064. * qedf_rport structure doesn't exist for
  1065. * directory server.
  1066. * We should not come here, as lport will
  1067. * take care of fabric login
  1068. */
  1069. QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
  1070. "exist for dir server port_id=%x\n",
  1071. rdata->ids.port_id);
  1072. break;
  1073. }
  1074. if (rdata->spp_type != FC_TYPE_FCP) {
  1075. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1076. "Not offloading since spp type isn't FCP\n");
  1077. break;
  1078. }
  1079. if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
  1080. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1081. "Not FCP target so not offloading\n");
  1082. break;
  1083. }
  1084. fcport->rdata = rdata;
  1085. fcport->rport = rport;
  1086. rval = qedf_alloc_sq(qedf, fcport);
  1087. if (rval) {
  1088. qedf_cleanup_fcport(qedf, fcport);
  1089. break;
  1090. }
  1091. /* Set device type */
  1092. if (rdata->flags & FC_RP_FLAGS_RETRY &&
  1093. rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
  1094. !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
  1095. fcport->dev_type = QEDF_RPORT_TYPE_TAPE;
  1096. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1097. "portid=%06x is a TAPE device.\n",
  1098. rdata->ids.port_id);
  1099. } else {
  1100. fcport->dev_type = QEDF_RPORT_TYPE_DISK;
  1101. }
  1102. rval = qedf_offload_connection(qedf, fcport);
  1103. if (rval) {
  1104. qedf_cleanup_fcport(qedf, fcport);
  1105. break;
  1106. }
  1107. /* Add fcport to list of qedf_ctx list of offloaded ports */
  1108. spin_lock_irqsave(&qedf->hba_lock, flags);
  1109. list_add_rcu(&fcport->peers, &qedf->fcports);
  1110. spin_unlock_irqrestore(&qedf->hba_lock, flags);
  1111. /*
  1112. * Set the session ready bit to let everyone know that this
  1113. * connection is ready for I/O
  1114. */
  1115. set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags);
  1116. atomic_inc(&qedf->num_offloads);
  1117. break;
  1118. case RPORT_EV_LOGO:
  1119. case RPORT_EV_FAILED:
  1120. case RPORT_EV_STOP:
  1121. port_id = rdata->ids.port_id;
  1122. if (port_id == FC_FID_DIR_SERV)
  1123. break;
  1124. if (!rport) {
  1125. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  1126. "port_id=%x - rport notcreated Yet!!\n", port_id);
  1127. break;
  1128. }
  1129. rp = rport->dd_data;
  1130. /*
  1131. * Perform session upload. Note that rdata->peers is already
  1132. * removed from disc->rports list before we get this event.
  1133. */
  1134. fcport = (struct qedf_rport *)&rp[1];
  1135. /* Only free this fcport if it is offloaded already */
  1136. if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  1137. set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags);
  1138. qedf_cleanup_fcport(qedf, fcport);
  1139. /*
  1140. * Remove fcport to list of qedf_ctx list of offloaded
  1141. * ports
  1142. */
  1143. spin_lock_irqsave(&qedf->hba_lock, flags);
  1144. list_del_rcu(&fcport->peers);
  1145. spin_unlock_irqrestore(&qedf->hba_lock, flags);
  1146. clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
  1147. &fcport->flags);
  1148. atomic_dec(&qedf->num_offloads);
  1149. }
  1150. break;
  1151. case RPORT_EV_NONE:
  1152. break;
  1153. }
  1154. }
  1155. static void qedf_abort_io(struct fc_lport *lport)
  1156. {
  1157. /* NO-OP but need to fill in the template */
  1158. }
  1159. static void qedf_fcp_cleanup(struct fc_lport *lport)
  1160. {
  1161. /*
  1162. * NO-OP but need to fill in template to prevent a NULL
  1163. * function pointer dereference during link down. I/Os
  1164. * will be flushed when port is uploaded.
  1165. */
  1166. }
  1167. static struct libfc_function_template qedf_lport_template = {
  1168. .frame_send = qedf_xmit,
  1169. .fcp_abort_io = qedf_abort_io,
  1170. .fcp_cleanup = qedf_fcp_cleanup,
  1171. .rport_event_callback = qedf_rport_event_handler,
  1172. .elsct_send = qedf_elsct_send,
  1173. };
  1174. static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
  1175. {
  1176. fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO);
  1177. qedf->ctlr.send = qedf_fip_send;
  1178. qedf->ctlr.get_src_addr = qedf_get_src_mac;
  1179. ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
  1180. }
  1181. static void qedf_setup_fdmi(struct qedf_ctx *qedf)
  1182. {
  1183. struct fc_lport *lport = qedf->lport;
  1184. struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
  1185. u8 buf[8];
  1186. int i, pos;
  1187. /*
  1188. * fdmi_enabled needs to be set for libfc to execute FDMI registration.
  1189. */
  1190. lport->fdmi_enabled = 1;
  1191. /*
  1192. * Setup the necessary fc_host attributes to that will be used to fill
  1193. * in the FDMI information.
  1194. */
  1195. /* Get the PCI-e Device Serial Number Capability */
  1196. pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
  1197. if (pos) {
  1198. pos += 4;
  1199. for (i = 0; i < 8; i++)
  1200. pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
  1201. snprintf(fc_host->serial_number,
  1202. sizeof(fc_host->serial_number),
  1203. "%02X%02X%02X%02X%02X%02X%02X%02X",
  1204. buf[7], buf[6], buf[5], buf[4],
  1205. buf[3], buf[2], buf[1], buf[0]);
  1206. } else
  1207. snprintf(fc_host->serial_number,
  1208. sizeof(fc_host->serial_number), "Unknown");
  1209. snprintf(fc_host->manufacturer,
  1210. sizeof(fc_host->manufacturer), "%s", "Cavium Inc.");
  1211. snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000");
  1212. snprintf(fc_host->model_description, sizeof(fc_host->model_description),
  1213. "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller"
  1214. "(FCoE)");
  1215. snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version),
  1216. "Rev %d", qedf->pdev->revision);
  1217. snprintf(fc_host->driver_version, sizeof(fc_host->driver_version),
  1218. "%s", QEDF_VERSION);
  1219. snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version),
  1220. "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION,
  1221. FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
  1222. }
  1223. static int qedf_lport_setup(struct qedf_ctx *qedf)
  1224. {
  1225. struct fc_lport *lport = qedf->lport;
  1226. lport->link_up = 0;
  1227. lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
  1228. lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
  1229. lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
  1230. FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
  1231. lport->boot_time = jiffies;
  1232. lport->e_d_tov = 2 * 1000;
  1233. lport->r_a_tov = 10 * 1000;
  1234. /* Set NPIV support */
  1235. lport->does_npiv = 1;
  1236. fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV;
  1237. fc_set_wwnn(lport, qedf->wwnn);
  1238. fc_set_wwpn(lport, qedf->wwpn);
  1239. fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0);
  1240. /* Allocate the exchange manager */
  1241. fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1,
  1242. qedf->max_els_xid, NULL);
  1243. if (fc_lport_init_stats(lport))
  1244. return -ENOMEM;
  1245. /* Finish lport config */
  1246. fc_lport_config(lport);
  1247. /* Set max frame size */
  1248. fc_set_mfs(lport, QEDF_MFS);
  1249. fc_host_maxframe_size(lport->host) = lport->mfs;
  1250. /* Set default dev_loss_tmo based on module parameter */
  1251. fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
  1252. /* Set symbolic node name */
  1253. snprintf(fc_host_symbolic_name(lport->host), 256,
  1254. "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
  1255. qedf_setup_fdmi(qedf);
  1256. return 0;
  1257. }
  1258. /*
  1259. * NPIV functions
  1260. */
  1261. static int qedf_vport_libfc_config(struct fc_vport *vport,
  1262. struct fc_lport *lport)
  1263. {
  1264. lport->link_up = 0;
  1265. lport->qfull = 0;
  1266. lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
  1267. lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
  1268. lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
  1269. FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
  1270. lport->boot_time = jiffies;
  1271. lport->e_d_tov = 2 * 1000;
  1272. lport->r_a_tov = 10 * 1000;
  1273. lport->does_npiv = 1; /* Temporary until we add NPIV support */
  1274. /* Allocate stats for vport */
  1275. if (fc_lport_init_stats(lport))
  1276. return -ENOMEM;
  1277. /* Finish lport config */
  1278. fc_lport_config(lport);
  1279. /* offload related configuration */
  1280. lport->crc_offload = 0;
  1281. lport->seq_offload = 0;
  1282. lport->lro_enabled = 0;
  1283. lport->lro_xid = 0;
  1284. lport->lso_max = 0;
  1285. return 0;
  1286. }
  1287. static int qedf_vport_create(struct fc_vport *vport, bool disabled)
  1288. {
  1289. struct Scsi_Host *shost = vport_to_shost(vport);
  1290. struct fc_lport *n_port = shost_priv(shost);
  1291. struct fc_lport *vn_port;
  1292. struct qedf_ctx *base_qedf = lport_priv(n_port);
  1293. struct qedf_ctx *vport_qedf;
  1294. char buf[32];
  1295. int rc = 0;
  1296. rc = fcoe_validate_vport_create(vport);
  1297. if (rc) {
  1298. fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
  1299. QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
  1300. "WWPN (0x%s) already exists.\n", buf);
  1301. goto err1;
  1302. }
  1303. if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
  1304. QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
  1305. "because link is not up.\n");
  1306. rc = -EIO;
  1307. goto err1;
  1308. }
  1309. vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
  1310. if (!vn_port) {
  1311. QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
  1312. "for vport.\n");
  1313. rc = -ENOMEM;
  1314. goto err1;
  1315. }
  1316. fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
  1317. QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n",
  1318. buf);
  1319. /* Copy some fields from base_qedf */
  1320. vport_qedf = lport_priv(vn_port);
  1321. memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx));
  1322. /* Set qedf data specific to this vport */
  1323. vport_qedf->lport = vn_port;
  1324. /* Use same hba_lock as base_qedf */
  1325. vport_qedf->hba_lock = base_qedf->hba_lock;
  1326. vport_qedf->pdev = base_qedf->pdev;
  1327. vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
  1328. init_completion(&vport_qedf->flogi_compl);
  1329. INIT_LIST_HEAD(&vport_qedf->fcports);
  1330. rc = qedf_vport_libfc_config(vport, vn_port);
  1331. if (rc) {
  1332. QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
  1333. "for lport stats.\n");
  1334. goto err2;
  1335. }
  1336. fc_set_wwnn(vn_port, vport->node_name);
  1337. fc_set_wwpn(vn_port, vport->port_name);
  1338. vport_qedf->wwnn = vn_port->wwnn;
  1339. vport_qedf->wwpn = vn_port->wwpn;
  1340. vn_port->host->transportt = qedf_fc_vport_transport_template;
  1341. vn_port->host->can_queue = QEDF_MAX_ELS_XID;
  1342. vn_port->host->max_lun = qedf_max_lun;
  1343. vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
  1344. vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
  1345. rc = scsi_add_host(vn_port->host, &vport->dev);
  1346. if (rc) {
  1347. QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n");
  1348. goto err2;
  1349. }
  1350. /* Set default dev_loss_tmo based on module parameter */
  1351. fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo;
  1352. /* Init libfc stuffs */
  1353. memcpy(&vn_port->tt, &qedf_lport_template,
  1354. sizeof(qedf_lport_template));
  1355. fc_exch_init(vn_port);
  1356. fc_elsct_init(vn_port);
  1357. fc_lport_init(vn_port);
  1358. fc_disc_init(vn_port);
  1359. fc_disc_config(vn_port, vn_port);
  1360. /* Allocate the exchange manager */
  1361. shost = vport_to_shost(vport);
  1362. n_port = shost_priv(shost);
  1363. fc_exch_mgr_list_clone(n_port, vn_port);
  1364. /* Set max frame size */
  1365. fc_set_mfs(vn_port, QEDF_MFS);
  1366. fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN;
  1367. if (disabled) {
  1368. fc_vport_set_state(vport, FC_VPORT_DISABLED);
  1369. } else {
  1370. vn_port->boot_time = jiffies;
  1371. fc_fabric_login(vn_port);
  1372. fc_vport_setlink(vn_port);
  1373. }
  1374. QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
  1375. vn_port);
  1376. /* Set up debug context for vport */
  1377. vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
  1378. vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
  1379. err2:
  1380. scsi_host_put(vn_port->host);
  1381. err1:
  1382. return rc;
  1383. }
  1384. static int qedf_vport_destroy(struct fc_vport *vport)
  1385. {
  1386. struct Scsi_Host *shost = vport_to_shost(vport);
  1387. struct fc_lport *n_port = shost_priv(shost);
  1388. struct fc_lport *vn_port = vport->dd_data;
  1389. mutex_lock(&n_port->lp_mutex);
  1390. list_del(&vn_port->list);
  1391. mutex_unlock(&n_port->lp_mutex);
  1392. fc_fabric_logoff(vn_port);
  1393. fc_lport_destroy(vn_port);
  1394. /* Detach from scsi-ml */
  1395. fc_remove_host(vn_port->host);
  1396. scsi_remove_host(vn_port->host);
  1397. /*
  1398. * Only try to release the exchange manager if the vn_port
  1399. * configuration is complete.
  1400. */
  1401. if (vn_port->state == LPORT_ST_READY)
  1402. fc_exch_mgr_free(vn_port);
  1403. /* Free memory used by statistical counters */
  1404. fc_lport_free_stats(vn_port);
  1405. /* Release Scsi_Host */
  1406. if (vn_port->host)
  1407. scsi_host_put(vn_port->host);
  1408. return 0;
  1409. }
  1410. static int qedf_vport_disable(struct fc_vport *vport, bool disable)
  1411. {
  1412. struct fc_lport *lport = vport->dd_data;
  1413. if (disable) {
  1414. fc_vport_set_state(vport, FC_VPORT_DISABLED);
  1415. fc_fabric_logoff(lport);
  1416. } else {
  1417. lport->boot_time = jiffies;
  1418. fc_fabric_login(lport);
  1419. fc_vport_setlink(lport);
  1420. }
  1421. return 0;
  1422. }
  1423. /*
  1424. * During removal we need to wait for all the vports associated with a port
  1425. * to be destroyed so we avoid a race condition where libfc is still trying
  1426. * to reap vports while the driver remove function has already reaped the
  1427. * driver contexts associated with the physical port.
  1428. */
  1429. static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
  1430. {
  1431. struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
  1432. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
  1433. "Entered.\n");
  1434. while (fc_host->npiv_vports_inuse > 0) {
  1435. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
  1436. "Waiting for all vports to be reaped.\n");
  1437. msleep(1000);
  1438. }
  1439. }
  1440. /**
  1441. * qedf_fcoe_reset - Resets the fcoe
  1442. *
  1443. * @shost: shost the reset is from
  1444. *
  1445. * Returns: always 0
  1446. */
  1447. static int qedf_fcoe_reset(struct Scsi_Host *shost)
  1448. {
  1449. struct fc_lport *lport = shost_priv(shost);
  1450. qedf_ctx_soft_reset(lport);
  1451. return 0;
  1452. }
  1453. static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
  1454. *shost)
  1455. {
  1456. struct fc_host_statistics *qedf_stats;
  1457. struct fc_lport *lport = shost_priv(shost);
  1458. struct qedf_ctx *qedf = lport_priv(lport);
  1459. struct qed_fcoe_stats *fw_fcoe_stats;
  1460. qedf_stats = fc_get_host_stats(shost);
  1461. /* We don't collect offload stats for specific NPIV ports */
  1462. if (lport->vport)
  1463. goto out;
  1464. fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
  1465. if (!fw_fcoe_stats) {
  1466. QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
  1467. "fw_fcoe_stats.\n");
  1468. goto out;
  1469. }
  1470. /* Query firmware for offload stats */
  1471. qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
  1472. /*
  1473. * The expectation is that we add our offload stats to the stats
  1474. * being maintained by libfc each time the fc_get_host_status callback
  1475. * is invoked. The additions are not carried over for each call to
  1476. * the fc_get_host_stats callback.
  1477. */
  1478. qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt +
  1479. fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt +
  1480. fw_fcoe_stats->fcoe_tx_other_pkt_cnt;
  1481. qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt +
  1482. fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt +
  1483. fw_fcoe_stats->fcoe_rx_other_pkt_cnt;
  1484. qedf_stats->fcp_input_megabytes +=
  1485. do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000);
  1486. qedf_stats->fcp_output_megabytes +=
  1487. do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000);
  1488. qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4;
  1489. qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4;
  1490. qedf_stats->invalid_crc_count +=
  1491. fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt;
  1492. qedf_stats->dumped_frames =
  1493. fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
  1494. qedf_stats->error_frames +=
  1495. fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
  1496. qedf_stats->fcp_input_requests += qedf->input_requests;
  1497. qedf_stats->fcp_output_requests += qedf->output_requests;
  1498. qedf_stats->fcp_control_requests += qedf->control_requests;
  1499. qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
  1500. qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
  1501. kfree(fw_fcoe_stats);
  1502. out:
  1503. return qedf_stats;
  1504. }
  1505. static struct fc_function_template qedf_fc_transport_fn = {
  1506. .show_host_node_name = 1,
  1507. .show_host_port_name = 1,
  1508. .show_host_supported_classes = 1,
  1509. .show_host_supported_fc4s = 1,
  1510. .show_host_active_fc4s = 1,
  1511. .show_host_maxframe_size = 1,
  1512. .show_host_port_id = 1,
  1513. .show_host_supported_speeds = 1,
  1514. .get_host_speed = fc_get_host_speed,
  1515. .show_host_speed = 1,
  1516. .show_host_port_type = 1,
  1517. .get_host_port_state = fc_get_host_port_state,
  1518. .show_host_port_state = 1,
  1519. .show_host_symbolic_name = 1,
  1520. /*
  1521. * Tell FC transport to allocate enough space to store the backpointer
  1522. * for the associate qedf_rport struct.
  1523. */
  1524. .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
  1525. sizeof(struct qedf_rport)),
  1526. .show_rport_maxframe_size = 1,
  1527. .show_rport_supported_classes = 1,
  1528. .show_host_fabric_name = 1,
  1529. .show_starget_node_name = 1,
  1530. .show_starget_port_name = 1,
  1531. .show_starget_port_id = 1,
  1532. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  1533. .show_rport_dev_loss_tmo = 1,
  1534. .get_fc_host_stats = qedf_fc_get_host_stats,
  1535. .issue_fc_host_lip = qedf_fcoe_reset,
  1536. .vport_create = qedf_vport_create,
  1537. .vport_delete = qedf_vport_destroy,
  1538. .vport_disable = qedf_vport_disable,
  1539. .bsg_request = fc_lport_bsg_request,
  1540. };
  1541. static struct fc_function_template qedf_fc_vport_transport_fn = {
  1542. .show_host_node_name = 1,
  1543. .show_host_port_name = 1,
  1544. .show_host_supported_classes = 1,
  1545. .show_host_supported_fc4s = 1,
  1546. .show_host_active_fc4s = 1,
  1547. .show_host_maxframe_size = 1,
  1548. .show_host_port_id = 1,
  1549. .show_host_supported_speeds = 1,
  1550. .get_host_speed = fc_get_host_speed,
  1551. .show_host_speed = 1,
  1552. .show_host_port_type = 1,
  1553. .get_host_port_state = fc_get_host_port_state,
  1554. .show_host_port_state = 1,
  1555. .show_host_symbolic_name = 1,
  1556. .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
  1557. sizeof(struct qedf_rport)),
  1558. .show_rport_maxframe_size = 1,
  1559. .show_rport_supported_classes = 1,
  1560. .show_host_fabric_name = 1,
  1561. .show_starget_node_name = 1,
  1562. .show_starget_port_name = 1,
  1563. .show_starget_port_id = 1,
  1564. .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
  1565. .show_rport_dev_loss_tmo = 1,
  1566. .get_fc_host_stats = fc_get_host_stats,
  1567. .issue_fc_host_lip = qedf_fcoe_reset,
  1568. .bsg_request = fc_lport_bsg_request,
  1569. };
  1570. static bool qedf_fp_has_work(struct qedf_fastpath *fp)
  1571. {
  1572. struct qedf_ctx *qedf = fp->qedf;
  1573. struct global_queue *que;
  1574. struct qed_sb_info *sb_info = fp->sb_info;
  1575. struct status_block *sb = sb_info->sb_virt;
  1576. u16 prod_idx;
  1577. /* Get the pointer to the global CQ this completion is on */
  1578. que = qedf->global_queues[fp->sb_id];
  1579. /* Be sure all responses have been written to PI */
  1580. rmb();
  1581. /* Get the current firmware producer index */
  1582. prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
  1583. return (que->cq_prod_idx != prod_idx);
  1584. }
  1585. /*
  1586. * Interrupt handler code.
  1587. */
  1588. /* Process completion queue and copy CQE contents for deferred processesing
  1589. *
  1590. * Return true if we should wake the I/O thread, false if not.
  1591. */
  1592. static bool qedf_process_completions(struct qedf_fastpath *fp)
  1593. {
  1594. struct qedf_ctx *qedf = fp->qedf;
  1595. struct qed_sb_info *sb_info = fp->sb_info;
  1596. struct status_block *sb = sb_info->sb_virt;
  1597. struct global_queue *que;
  1598. u16 prod_idx;
  1599. struct fcoe_cqe *cqe;
  1600. struct qedf_io_work *io_work;
  1601. int num_handled = 0;
  1602. unsigned int cpu;
  1603. struct qedf_ioreq *io_req = NULL;
  1604. u16 xid;
  1605. u16 new_cqes;
  1606. u32 comp_type;
  1607. /* Get the current firmware producer index */
  1608. prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
  1609. /* Get the pointer to the global CQ this completion is on */
  1610. que = qedf->global_queues[fp->sb_id];
  1611. /* Calculate the amount of new elements since last processing */
  1612. new_cqes = (prod_idx >= que->cq_prod_idx) ?
  1613. (prod_idx - que->cq_prod_idx) :
  1614. 0x10000 - que->cq_prod_idx + prod_idx;
  1615. /* Save producer index */
  1616. que->cq_prod_idx = prod_idx;
  1617. while (new_cqes) {
  1618. fp->completions++;
  1619. num_handled++;
  1620. cqe = &que->cq[que->cq_cons_idx];
  1621. comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
  1622. FCOE_CQE_CQE_TYPE_MASK;
  1623. /*
  1624. * Process unsolicited CQEs directly in the interrupt handler
  1625. * sine we need the fastpath ID
  1626. */
  1627. if (comp_type == FCOE_UNSOLIC_CQE_TYPE) {
  1628. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
  1629. "Unsolicated CQE.\n");
  1630. qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
  1631. /*
  1632. * Don't add a work list item. Increment consumer
  1633. * consumer index and move on.
  1634. */
  1635. goto inc_idx;
  1636. }
  1637. xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
  1638. io_req = &qedf->cmd_mgr->cmds[xid];
  1639. /*
  1640. * Figure out which percpu thread we should queue this I/O
  1641. * on.
  1642. */
  1643. if (!io_req)
  1644. /* If there is not io_req assocated with this CQE
  1645. * just queue it on CPU 0
  1646. */
  1647. cpu = 0;
  1648. else {
  1649. cpu = io_req->cpu;
  1650. io_req->int_cpu = smp_processor_id();
  1651. }
  1652. io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
  1653. if (!io_work) {
  1654. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
  1655. "work for I/O completion.\n");
  1656. continue;
  1657. }
  1658. memset(io_work, 0, sizeof(struct qedf_io_work));
  1659. INIT_WORK(&io_work->work, qedf_fp_io_handler);
  1660. /* Copy contents of CQE for deferred processing */
  1661. memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
  1662. io_work->qedf = fp->qedf;
  1663. io_work->fp = NULL; /* Only used for unsolicited frames */
  1664. queue_work_on(cpu, qedf_io_wq, &io_work->work);
  1665. inc_idx:
  1666. que->cq_cons_idx++;
  1667. if (que->cq_cons_idx == fp->cq_num_entries)
  1668. que->cq_cons_idx = 0;
  1669. new_cqes--;
  1670. }
  1671. return true;
  1672. }
  1673. /* MSI-X fastpath handler code */
  1674. static irqreturn_t qedf_msix_handler(int irq, void *dev_id)
  1675. {
  1676. struct qedf_fastpath *fp = dev_id;
  1677. if (!fp) {
  1678. QEDF_ERR(NULL, "fp is null.\n");
  1679. return IRQ_HANDLED;
  1680. }
  1681. if (!fp->sb_info) {
  1682. QEDF_ERR(NULL, "fp->sb_info in null.");
  1683. return IRQ_HANDLED;
  1684. }
  1685. /*
  1686. * Disable interrupts for this status block while we process new
  1687. * completions
  1688. */
  1689. qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
  1690. while (1) {
  1691. qedf_process_completions(fp);
  1692. if (qedf_fp_has_work(fp) == 0) {
  1693. /* Update the sb information */
  1694. qed_sb_update_sb_idx(fp->sb_info);
  1695. /* Check for more work */
  1696. rmb();
  1697. if (qedf_fp_has_work(fp) == 0) {
  1698. /* Re-enable interrupts */
  1699. qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
  1700. return IRQ_HANDLED;
  1701. }
  1702. }
  1703. }
  1704. /* Do we ever want to break out of above loop? */
  1705. return IRQ_HANDLED;
  1706. }
  1707. /* simd handler for MSI/INTa */
  1708. static void qedf_simd_int_handler(void *cookie)
  1709. {
  1710. /* Cookie is qedf_ctx struct */
  1711. struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
  1712. QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
  1713. }
  1714. #define QEDF_SIMD_HANDLER_NUM 0
  1715. static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
  1716. {
  1717. int i;
  1718. if (qedf->int_info.msix_cnt) {
  1719. for (i = 0; i < qedf->int_info.used_cnt; i++) {
  1720. synchronize_irq(qedf->int_info.msix[i].vector);
  1721. irq_set_affinity_hint(qedf->int_info.msix[i].vector,
  1722. NULL);
  1723. irq_set_affinity_notifier(qedf->int_info.msix[i].vector,
  1724. NULL);
  1725. free_irq(qedf->int_info.msix[i].vector,
  1726. &qedf->fp_array[i]);
  1727. }
  1728. } else
  1729. qed_ops->common->simd_handler_clean(qedf->cdev,
  1730. QEDF_SIMD_HANDLER_NUM);
  1731. qedf->int_info.used_cnt = 0;
  1732. qed_ops->common->set_fp_int(qedf->cdev, 0);
  1733. }
  1734. static int qedf_request_msix_irq(struct qedf_ctx *qedf)
  1735. {
  1736. int i, rc, cpu;
  1737. cpu = cpumask_first(cpu_online_mask);
  1738. for (i = 0; i < qedf->num_queues; i++) {
  1739. rc = request_irq(qedf->int_info.msix[i].vector,
  1740. qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]);
  1741. if (rc) {
  1742. QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
  1743. qedf_sync_free_irqs(qedf);
  1744. return rc;
  1745. }
  1746. qedf->int_info.used_cnt++;
  1747. rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector,
  1748. get_cpu_mask(cpu));
  1749. cpu = cpumask_next(cpu, cpu_online_mask);
  1750. }
  1751. return 0;
  1752. }
  1753. static int qedf_setup_int(struct qedf_ctx *qedf)
  1754. {
  1755. int rc = 0;
  1756. /*
  1757. * Learn interrupt configuration
  1758. */
  1759. rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
  1760. if (rc <= 0)
  1761. return 0;
  1762. rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
  1763. if (rc)
  1764. return 0;
  1765. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
  1766. "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
  1767. num_online_cpus());
  1768. if (qedf->int_info.msix_cnt)
  1769. return qedf_request_msix_irq(qedf);
  1770. qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
  1771. QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
  1772. qedf->int_info.used_cnt = 1;
  1773. return 0;
  1774. }
  1775. /* Main function for libfc frame reception */
  1776. static void qedf_recv_frame(struct qedf_ctx *qedf,
  1777. struct sk_buff *skb)
  1778. {
  1779. u32 fr_len;
  1780. struct fc_lport *lport;
  1781. struct fc_frame_header *fh;
  1782. struct fcoe_crc_eof crc_eof;
  1783. struct fc_frame *fp;
  1784. u8 *mac = NULL;
  1785. u8 *dest_mac = NULL;
  1786. struct fcoe_hdr *hp;
  1787. struct qedf_rport *fcport;
  1788. struct fc_lport *vn_port;
  1789. u32 f_ctl;
  1790. lport = qedf->lport;
  1791. if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
  1792. QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n");
  1793. kfree_skb(skb);
  1794. return;
  1795. }
  1796. if (skb_is_nonlinear(skb))
  1797. skb_linearize(skb);
  1798. mac = eth_hdr(skb)->h_source;
  1799. dest_mac = eth_hdr(skb)->h_dest;
  1800. /* Pull the header */
  1801. hp = (struct fcoe_hdr *)skb->data;
  1802. fh = (struct fc_frame_header *) skb_transport_header(skb);
  1803. skb_pull(skb, sizeof(struct fcoe_hdr));
  1804. fr_len = skb->len - sizeof(struct fcoe_crc_eof);
  1805. fp = (struct fc_frame *)skb;
  1806. fc_frame_init(fp);
  1807. fr_dev(fp) = lport;
  1808. fr_sof(fp) = hp->fcoe_sof;
  1809. if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
  1810. kfree_skb(skb);
  1811. return;
  1812. }
  1813. fr_eof(fp) = crc_eof.fcoe_eof;
  1814. fr_crc(fp) = crc_eof.fcoe_crc32;
  1815. if (pskb_trim(skb, fr_len)) {
  1816. kfree_skb(skb);
  1817. return;
  1818. }
  1819. fh = fc_frame_header_get(fp);
  1820. /*
  1821. * Invalid frame filters.
  1822. */
  1823. if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
  1824. fh->fh_type == FC_TYPE_FCP) {
  1825. /* Drop FCP data. We dont this in L2 path */
  1826. kfree_skb(skb);
  1827. return;
  1828. }
  1829. if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
  1830. fh->fh_type == FC_TYPE_ELS) {
  1831. switch (fc_frame_payload_op(fp)) {
  1832. case ELS_LOGO:
  1833. if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
  1834. /* drop non-FIP LOGO */
  1835. kfree_skb(skb);
  1836. return;
  1837. }
  1838. break;
  1839. }
  1840. }
  1841. if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
  1842. /* Drop incoming ABTS */
  1843. kfree_skb(skb);
  1844. return;
  1845. }
  1846. if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
  1847. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  1848. "FC frame d_id mismatch with MAC %pM.\n", dest_mac);
  1849. return;
  1850. }
  1851. if (qedf->ctlr.state) {
  1852. if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) {
  1853. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  1854. "Wrong source address: mac:%pM dest_addr:%pM.\n",
  1855. mac, qedf->ctlr.dest_addr);
  1856. kfree_skb(skb);
  1857. return;
  1858. }
  1859. }
  1860. vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
  1861. /*
  1862. * If the destination ID from the frame header does not match what we
  1863. * have on record for lport and the search for a NPIV port came up
  1864. * empty then this is not addressed to our port so simply drop it.
  1865. */
  1866. if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
  1867. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  1868. "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
  1869. lport->port_id, ntoh24(fh->fh_d_id));
  1870. kfree_skb(skb);
  1871. return;
  1872. }
  1873. f_ctl = ntoh24(fh->fh_f_ctl);
  1874. if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
  1875. (f_ctl & FC_FC_EX_CTX)) {
  1876. /* Drop incoming ABTS response that has both SEQ/EX CTX set */
  1877. kfree_skb(skb);
  1878. return;
  1879. }
  1880. /*
  1881. * If a connection is uploading, drop incoming FCoE frames as there
  1882. * is a small window where we could try to return a frame while libfc
  1883. * is trying to clean things up.
  1884. */
  1885. /* Get fcport associated with d_id if it exists */
  1886. fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
  1887. if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
  1888. &fcport->flags)) {
  1889. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
  1890. "Connection uploading, dropping fp=%p.\n", fp);
  1891. kfree_skb(skb);
  1892. return;
  1893. }
  1894. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
  1895. "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp,
  1896. ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
  1897. fh->fh_type);
  1898. if (qedf_dump_frames)
  1899. print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
  1900. 1, skb->data, skb->len, false);
  1901. fc_exch_recv(lport, fp);
  1902. }
  1903. static void qedf_ll2_process_skb(struct work_struct *work)
  1904. {
  1905. struct qedf_skb_work *skb_work =
  1906. container_of(work, struct qedf_skb_work, work);
  1907. struct qedf_ctx *qedf = skb_work->qedf;
  1908. struct sk_buff *skb = skb_work->skb;
  1909. struct ethhdr *eh;
  1910. if (!qedf) {
  1911. QEDF_ERR(NULL, "qedf is NULL\n");
  1912. goto err_out;
  1913. }
  1914. eh = (struct ethhdr *)skb->data;
  1915. /* Undo VLAN encapsulation */
  1916. if (eh->h_proto == htons(ETH_P_8021Q)) {
  1917. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  1918. eh = skb_pull(skb, VLAN_HLEN);
  1919. skb_reset_mac_header(skb);
  1920. }
  1921. /*
  1922. * Process either a FIP frame or FCoE frame based on the
  1923. * protocol value. If it's not either just drop the
  1924. * frame.
  1925. */
  1926. if (eh->h_proto == htons(ETH_P_FIP)) {
  1927. qedf_fip_recv(qedf, skb);
  1928. goto out;
  1929. } else if (eh->h_proto == htons(ETH_P_FCOE)) {
  1930. __skb_pull(skb, ETH_HLEN);
  1931. qedf_recv_frame(qedf, skb);
  1932. goto out;
  1933. } else
  1934. goto err_out;
  1935. err_out:
  1936. kfree_skb(skb);
  1937. out:
  1938. kfree(skb_work);
  1939. return;
  1940. }
  1941. static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
  1942. u32 arg1, u32 arg2)
  1943. {
  1944. struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
  1945. struct qedf_skb_work *skb_work;
  1946. skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
  1947. if (!skb_work) {
  1948. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
  1949. "dropping frame.\n");
  1950. kfree_skb(skb);
  1951. return 0;
  1952. }
  1953. INIT_WORK(&skb_work->work, qedf_ll2_process_skb);
  1954. skb_work->skb = skb;
  1955. skb_work->qedf = qedf;
  1956. queue_work(qedf->ll2_recv_wq, &skb_work->work);
  1957. return 0;
  1958. }
  1959. static struct qed_ll2_cb_ops qedf_ll2_cb_ops = {
  1960. .rx_cb = qedf_ll2_rx,
  1961. .tx_cb = NULL,
  1962. };
  1963. /* Main thread to process I/O completions */
  1964. void qedf_fp_io_handler(struct work_struct *work)
  1965. {
  1966. struct qedf_io_work *io_work =
  1967. container_of(work, struct qedf_io_work, work);
  1968. u32 comp_type;
  1969. /*
  1970. * Deferred part of unsolicited CQE sends
  1971. * frame to libfc.
  1972. */
  1973. comp_type = (io_work->cqe.cqe_data >>
  1974. FCOE_CQE_CQE_TYPE_SHIFT) &
  1975. FCOE_CQE_CQE_TYPE_MASK;
  1976. if (comp_type == FCOE_UNSOLIC_CQE_TYPE &&
  1977. io_work->fp)
  1978. fc_exch_recv(io_work->qedf->lport, io_work->fp);
  1979. else
  1980. qedf_process_cqe(io_work->qedf, &io_work->cqe);
  1981. kfree(io_work);
  1982. }
  1983. static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
  1984. struct qed_sb_info *sb_info, u16 sb_id)
  1985. {
  1986. struct status_block *sb_virt;
  1987. dma_addr_t sb_phys;
  1988. int ret;
  1989. sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
  1990. sizeof(struct status_block), &sb_phys, GFP_KERNEL);
  1991. if (!sb_virt) {
  1992. QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
  1993. "for id = %d.\n", sb_id);
  1994. return -ENOMEM;
  1995. }
  1996. ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
  1997. sb_id, QED_SB_TYPE_STORAGE);
  1998. if (ret) {
  1999. QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization "
  2000. "failed for id = %d.\n", sb_id);
  2001. return ret;
  2002. }
  2003. return 0;
  2004. }
  2005. static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
  2006. {
  2007. if (sb_info->sb_virt)
  2008. dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt),
  2009. (void *)sb_info->sb_virt, sb_info->sb_phys);
  2010. }
  2011. static void qedf_destroy_sb(struct qedf_ctx *qedf)
  2012. {
  2013. int id;
  2014. struct qedf_fastpath *fp = NULL;
  2015. for (id = 0; id < qedf->num_queues; id++) {
  2016. fp = &(qedf->fp_array[id]);
  2017. if (fp->sb_id == QEDF_SB_ID_NULL)
  2018. break;
  2019. qedf_free_sb(qedf, fp->sb_info);
  2020. kfree(fp->sb_info);
  2021. }
  2022. kfree(qedf->fp_array);
  2023. }
  2024. static int qedf_prepare_sb(struct qedf_ctx *qedf)
  2025. {
  2026. int id;
  2027. struct qedf_fastpath *fp;
  2028. int ret;
  2029. qedf->fp_array =
  2030. kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath),
  2031. GFP_KERNEL);
  2032. if (!qedf->fp_array) {
  2033. QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
  2034. "failed.\n");
  2035. return -ENOMEM;
  2036. }
  2037. for (id = 0; id < qedf->num_queues; id++) {
  2038. fp = &(qedf->fp_array[id]);
  2039. fp->sb_id = QEDF_SB_ID_NULL;
  2040. fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
  2041. if (!fp->sb_info) {
  2042. QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
  2043. "allocation failed.\n");
  2044. goto err;
  2045. }
  2046. ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id);
  2047. if (ret) {
  2048. QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
  2049. "initialization failed.\n");
  2050. goto err;
  2051. }
  2052. fp->sb_id = id;
  2053. fp->qedf = qedf;
  2054. fp->cq_num_entries =
  2055. qedf->global_queues[id]->cq_mem_size /
  2056. sizeof(struct fcoe_cqe);
  2057. }
  2058. err:
  2059. return 0;
  2060. }
  2061. void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
  2062. {
  2063. u16 xid;
  2064. struct qedf_ioreq *io_req;
  2065. struct qedf_rport *fcport;
  2066. u32 comp_type;
  2067. comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
  2068. FCOE_CQE_CQE_TYPE_MASK;
  2069. xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
  2070. io_req = &qedf->cmd_mgr->cmds[xid];
  2071. /* Completion not for a valid I/O anymore so just return */
  2072. if (!io_req)
  2073. return;
  2074. fcport = io_req->fcport;
  2075. if (fcport == NULL) {
  2076. QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n");
  2077. return;
  2078. }
  2079. /*
  2080. * Check that fcport is offloaded. If it isn't then the spinlock
  2081. * isn't valid and shouldn't be taken. We should just return.
  2082. */
  2083. if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
  2084. QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
  2085. return;
  2086. }
  2087. switch (comp_type) {
  2088. case FCOE_GOOD_COMPLETION_CQE_TYPE:
  2089. atomic_inc(&fcport->free_sqes);
  2090. switch (io_req->cmd_type) {
  2091. case QEDF_SCSI_CMD:
  2092. qedf_scsi_completion(qedf, cqe, io_req);
  2093. break;
  2094. case QEDF_ELS:
  2095. qedf_process_els_compl(qedf, cqe, io_req);
  2096. break;
  2097. case QEDF_TASK_MGMT_CMD:
  2098. qedf_process_tmf_compl(qedf, cqe, io_req);
  2099. break;
  2100. case QEDF_SEQ_CLEANUP:
  2101. qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
  2102. break;
  2103. }
  2104. break;
  2105. case FCOE_ERROR_DETECTION_CQE_TYPE:
  2106. atomic_inc(&fcport->free_sqes);
  2107. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2108. "Error detect CQE.\n");
  2109. qedf_process_error_detect(qedf, cqe, io_req);
  2110. break;
  2111. case FCOE_EXCH_CLEANUP_CQE_TYPE:
  2112. atomic_inc(&fcport->free_sqes);
  2113. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2114. "Cleanup CQE.\n");
  2115. qedf_process_cleanup_compl(qedf, cqe, io_req);
  2116. break;
  2117. case FCOE_ABTS_CQE_TYPE:
  2118. atomic_inc(&fcport->free_sqes);
  2119. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2120. "Abort CQE.\n");
  2121. qedf_process_abts_compl(qedf, cqe, io_req);
  2122. break;
  2123. case FCOE_DUMMY_CQE_TYPE:
  2124. atomic_inc(&fcport->free_sqes);
  2125. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2126. "Dummy CQE.\n");
  2127. break;
  2128. case FCOE_LOCAL_COMP_CQE_TYPE:
  2129. atomic_inc(&fcport->free_sqes);
  2130. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2131. "Local completion CQE.\n");
  2132. break;
  2133. case FCOE_WARNING_CQE_TYPE:
  2134. atomic_inc(&fcport->free_sqes);
  2135. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2136. "Warning CQE.\n");
  2137. qedf_process_warning_compl(qedf, cqe, io_req);
  2138. break;
  2139. case MAX_FCOE_CQE_TYPE:
  2140. atomic_inc(&fcport->free_sqes);
  2141. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2142. "Max FCoE CQE.\n");
  2143. break;
  2144. default:
  2145. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
  2146. "Default CQE.\n");
  2147. break;
  2148. }
  2149. }
  2150. static void qedf_free_bdq(struct qedf_ctx *qedf)
  2151. {
  2152. int i;
  2153. if (qedf->bdq_pbl_list)
  2154. dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
  2155. qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma);
  2156. if (qedf->bdq_pbl)
  2157. dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size,
  2158. qedf->bdq_pbl, qedf->bdq_pbl_dma);
  2159. for (i = 0; i < QEDF_BDQ_SIZE; i++) {
  2160. if (qedf->bdq[i].buf_addr) {
  2161. dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
  2162. qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma);
  2163. }
  2164. }
  2165. }
  2166. static void qedf_free_global_queues(struct qedf_ctx *qedf)
  2167. {
  2168. int i;
  2169. struct global_queue **gl = qedf->global_queues;
  2170. for (i = 0; i < qedf->num_queues; i++) {
  2171. if (!gl[i])
  2172. continue;
  2173. if (gl[i]->cq)
  2174. dma_free_coherent(&qedf->pdev->dev,
  2175. gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma);
  2176. if (gl[i]->cq_pbl)
  2177. dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size,
  2178. gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
  2179. kfree(gl[i]);
  2180. }
  2181. qedf_free_bdq(qedf);
  2182. }
  2183. static int qedf_alloc_bdq(struct qedf_ctx *qedf)
  2184. {
  2185. int i;
  2186. struct scsi_bd *pbl;
  2187. u64 *list;
  2188. dma_addr_t page;
  2189. /* Alloc dma memory for BDQ buffers */
  2190. for (i = 0; i < QEDF_BDQ_SIZE; i++) {
  2191. qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev,
  2192. QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL);
  2193. if (!qedf->bdq[i].buf_addr) {
  2194. QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
  2195. "buffer %d.\n", i);
  2196. return -ENOMEM;
  2197. }
  2198. }
  2199. /* Alloc dma memory for BDQ page buffer list */
  2200. qedf->bdq_pbl_mem_size =
  2201. QEDF_BDQ_SIZE * sizeof(struct scsi_bd);
  2202. qedf->bdq_pbl_mem_size =
  2203. ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
  2204. qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
  2205. qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL);
  2206. if (!qedf->bdq_pbl) {
  2207. QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
  2208. return -ENOMEM;
  2209. }
  2210. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2211. "BDQ PBL addr=0x%p dma=%pad\n",
  2212. qedf->bdq_pbl, &qedf->bdq_pbl_dma);
  2213. /*
  2214. * Populate BDQ PBL with physical and virtual address of individual
  2215. * BDQ buffers
  2216. */
  2217. pbl = (struct scsi_bd *)qedf->bdq_pbl;
  2218. for (i = 0; i < QEDF_BDQ_SIZE; i++) {
  2219. pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
  2220. pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
  2221. pbl->opaque.hi = 0;
  2222. /* Opaque lo data is an index into the BDQ array */
  2223. pbl->opaque.lo = cpu_to_le32(i);
  2224. pbl++;
  2225. }
  2226. /* Allocate list of PBL pages */
  2227. qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev,
  2228. QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL);
  2229. if (!qedf->bdq_pbl_list) {
  2230. QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
  2231. return -ENOMEM;
  2232. }
  2233. /*
  2234. * Now populate PBL list with pages that contain pointers to the
  2235. * individual buffers.
  2236. */
  2237. qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
  2238. QEDF_PAGE_SIZE;
  2239. list = (u64 *)qedf->bdq_pbl_list;
  2240. page = qedf->bdq_pbl_list_dma;
  2241. for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
  2242. *list = qedf->bdq_pbl_dma;
  2243. list++;
  2244. page += QEDF_PAGE_SIZE;
  2245. }
  2246. return 0;
  2247. }
  2248. static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
  2249. {
  2250. u32 *list;
  2251. int i;
  2252. int status = 0, rc;
  2253. u32 *pbl;
  2254. dma_addr_t page;
  2255. int num_pages;
  2256. /* Allocate and map CQs, RQs */
  2257. /*
  2258. * Number of global queues (CQ / RQ). This should
  2259. * be <= number of available MSIX vectors for the PF
  2260. */
  2261. if (!qedf->num_queues) {
  2262. QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
  2263. return 1;
  2264. }
  2265. /*
  2266. * Make sure we allocated the PBL that will contain the physical
  2267. * addresses of our queues
  2268. */
  2269. if (!qedf->p_cpuq) {
  2270. status = 1;
  2271. goto mem_alloc_failure;
  2272. }
  2273. qedf->global_queues = kzalloc((sizeof(struct global_queue *)
  2274. * qedf->num_queues), GFP_KERNEL);
  2275. if (!qedf->global_queues) {
  2276. QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
  2277. "queues array ptr memory\n");
  2278. return -ENOMEM;
  2279. }
  2280. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2281. "qedf->global_queues=%p.\n", qedf->global_queues);
  2282. /* Allocate DMA coherent buffers for BDQ */
  2283. rc = qedf_alloc_bdq(qedf);
  2284. if (rc)
  2285. goto mem_alloc_failure;
  2286. /* Allocate a CQ and an associated PBL for each MSI-X vector */
  2287. for (i = 0; i < qedf->num_queues; i++) {
  2288. qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
  2289. GFP_KERNEL);
  2290. if (!qedf->global_queues[i]) {
  2291. QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
  2292. "global queue %d.\n", i);
  2293. status = -ENOMEM;
  2294. goto mem_alloc_failure;
  2295. }
  2296. qedf->global_queues[i]->cq_mem_size =
  2297. FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
  2298. qedf->global_queues[i]->cq_mem_size =
  2299. ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
  2300. qedf->global_queues[i]->cq_pbl_size =
  2301. (qedf->global_queues[i]->cq_mem_size /
  2302. PAGE_SIZE) * sizeof(void *);
  2303. qedf->global_queues[i]->cq_pbl_size =
  2304. ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
  2305. qedf->global_queues[i]->cq =
  2306. dma_zalloc_coherent(&qedf->pdev->dev,
  2307. qedf->global_queues[i]->cq_mem_size,
  2308. &qedf->global_queues[i]->cq_dma, GFP_KERNEL);
  2309. if (!qedf->global_queues[i]->cq) {
  2310. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
  2311. status = -ENOMEM;
  2312. goto mem_alloc_failure;
  2313. }
  2314. qedf->global_queues[i]->cq_pbl =
  2315. dma_zalloc_coherent(&qedf->pdev->dev,
  2316. qedf->global_queues[i]->cq_pbl_size,
  2317. &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL);
  2318. if (!qedf->global_queues[i]->cq_pbl) {
  2319. QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
  2320. status = -ENOMEM;
  2321. goto mem_alloc_failure;
  2322. }
  2323. /* Create PBL */
  2324. num_pages = qedf->global_queues[i]->cq_mem_size /
  2325. QEDF_PAGE_SIZE;
  2326. page = qedf->global_queues[i]->cq_dma;
  2327. pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
  2328. while (num_pages--) {
  2329. *pbl = U64_LO(page);
  2330. pbl++;
  2331. *pbl = U64_HI(page);
  2332. pbl++;
  2333. page += QEDF_PAGE_SIZE;
  2334. }
  2335. /* Set the initial consumer index for cq */
  2336. qedf->global_queues[i]->cq_cons_idx = 0;
  2337. }
  2338. list = (u32 *)qedf->p_cpuq;
  2339. /*
  2340. * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
  2341. * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
  2342. * to the physical address which contains an array of pointers to
  2343. * the physical addresses of the specific queue pages.
  2344. */
  2345. for (i = 0; i < qedf->num_queues; i++) {
  2346. *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
  2347. list++;
  2348. *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
  2349. list++;
  2350. *list = U64_LO(0);
  2351. list++;
  2352. *list = U64_HI(0);
  2353. list++;
  2354. }
  2355. return 0;
  2356. mem_alloc_failure:
  2357. qedf_free_global_queues(qedf);
  2358. return status;
  2359. }
  2360. static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
  2361. {
  2362. u8 sq_num_pbl_pages;
  2363. u32 sq_mem_size;
  2364. u32 cq_mem_size;
  2365. u32 cq_num_entries;
  2366. int rval;
  2367. /*
  2368. * The number of completion queues/fastpath interrupts/status blocks
  2369. * we allocation is the minimum off:
  2370. *
  2371. * Number of CPUs
  2372. * Number allocated by qed for our PCI function
  2373. */
  2374. qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
  2375. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
  2376. qedf->num_queues);
  2377. qedf->p_cpuq = pci_alloc_consistent(qedf->pdev,
  2378. qedf->num_queues * sizeof(struct qedf_glbl_q_params),
  2379. &qedf->hw_p_cpuq);
  2380. if (!qedf->p_cpuq) {
  2381. QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n");
  2382. return 1;
  2383. }
  2384. rval = qedf_alloc_global_queues(qedf);
  2385. if (rval) {
  2386. QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
  2387. "failed.\n");
  2388. return 1;
  2389. }
  2390. /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */
  2391. sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
  2392. sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE);
  2393. sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE);
  2394. /* Calculate CQ num entries */
  2395. cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
  2396. cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
  2397. cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
  2398. memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
  2399. /* Setup the value for fcoe PF */
  2400. qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
  2401. qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
  2402. qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
  2403. (u64)qedf->hw_p_cpuq;
  2404. qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
  2405. qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
  2406. qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
  2407. qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
  2408. /* log_page_size: 12 for 4KB pages */
  2409. qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
  2410. qedf->pf_params.fcoe_pf_params.mtu = 9000;
  2411. qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
  2412. qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
  2413. /* BDQ address and size */
  2414. qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
  2415. qedf->bdq_pbl_list_dma;
  2416. qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
  2417. qedf->bdq_pbl_list_num_entries;
  2418. qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
  2419. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2420. "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n",
  2421. qedf->bdq_pbl_list,
  2422. qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
  2423. qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
  2424. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2425. "cq_num_entries=%d.\n",
  2426. qedf->pf_params.fcoe_pf_params.cq_num_entries);
  2427. return 0;
  2428. }
  2429. /* Free DMA coherent memory for array of queue pointers we pass to qed */
  2430. static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
  2431. {
  2432. size_t size = 0;
  2433. if (qedf->p_cpuq) {
  2434. size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
  2435. pci_free_consistent(qedf->pdev, size, qedf->p_cpuq,
  2436. qedf->hw_p_cpuq);
  2437. }
  2438. qedf_free_global_queues(qedf);
  2439. if (qedf->global_queues)
  2440. kfree(qedf->global_queues);
  2441. }
  2442. /*
  2443. * PCI driver functions
  2444. */
  2445. static const struct pci_device_id qedf_pci_tbl[] = {
  2446. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) },
  2447. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) },
  2448. {0}
  2449. };
  2450. MODULE_DEVICE_TABLE(pci, qedf_pci_tbl);
  2451. static struct pci_driver qedf_pci_driver = {
  2452. .name = QEDF_MODULE_NAME,
  2453. .id_table = qedf_pci_tbl,
  2454. .probe = qedf_probe,
  2455. .remove = qedf_remove,
  2456. };
  2457. static int __qedf_probe(struct pci_dev *pdev, int mode)
  2458. {
  2459. int rc = -EINVAL;
  2460. struct fc_lport *lport;
  2461. struct qedf_ctx *qedf;
  2462. struct Scsi_Host *host;
  2463. bool is_vf = false;
  2464. struct qed_ll2_params params;
  2465. char host_buf[20];
  2466. struct qed_link_params link_params;
  2467. int status;
  2468. void *task_start, *task_end;
  2469. struct qed_slowpath_params slowpath_params;
  2470. struct qed_probe_params qed_params;
  2471. u16 tmp;
  2472. /*
  2473. * When doing error recovery we didn't reap the lport so don't try
  2474. * to reallocate it.
  2475. */
  2476. if (mode != QEDF_MODE_RECOVERY) {
  2477. lport = libfc_host_alloc(&qedf_host_template,
  2478. sizeof(struct qedf_ctx));
  2479. if (!lport) {
  2480. QEDF_ERR(NULL, "Could not allocate lport.\n");
  2481. rc = -ENOMEM;
  2482. goto err0;
  2483. }
  2484. /* Initialize qedf_ctx */
  2485. qedf = lport_priv(lport);
  2486. qedf->lport = lport;
  2487. qedf->ctlr.lp = lport;
  2488. qedf->pdev = pdev;
  2489. qedf->dbg_ctx.pdev = pdev;
  2490. qedf->dbg_ctx.host_no = lport->host->host_no;
  2491. spin_lock_init(&qedf->hba_lock);
  2492. INIT_LIST_HEAD(&qedf->fcports);
  2493. qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
  2494. atomic_set(&qedf->num_offloads, 0);
  2495. qedf->stop_io_on_error = false;
  2496. pci_set_drvdata(pdev, qedf);
  2497. init_completion(&qedf->fipvlan_compl);
  2498. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
  2499. "QLogic FastLinQ FCoE Module qedf %s, "
  2500. "FW %d.%d.%d.%d\n", QEDF_VERSION,
  2501. FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
  2502. FW_ENGINEERING_VERSION);
  2503. } else {
  2504. /* Init pointers during recovery */
  2505. qedf = pci_get_drvdata(pdev);
  2506. lport = qedf->lport;
  2507. }
  2508. host = lport->host;
  2509. /* Allocate mempool for qedf_io_work structs */
  2510. qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
  2511. qedf_io_work_cache);
  2512. if (qedf->io_mempool == NULL) {
  2513. QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
  2514. goto err1;
  2515. }
  2516. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
  2517. qedf->io_mempool);
  2518. sprintf(host_buf, "qedf_%u_link",
  2519. qedf->lport->host->host_no);
  2520. qedf->link_update_wq = create_workqueue(host_buf);
  2521. INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
  2522. INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
  2523. qedf->fipvlan_retries = qedf_fipvlan_retries;
  2524. /*
  2525. * Common probe. Takes care of basic hardware init and pci_*
  2526. * functions.
  2527. */
  2528. memset(&qed_params, 0, sizeof(qed_params));
  2529. qed_params.protocol = QED_PROTOCOL_FCOE;
  2530. qed_params.dp_module = qedf_dp_module;
  2531. qed_params.dp_level = qedf_dp_level;
  2532. qed_params.is_vf = is_vf;
  2533. qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
  2534. if (!qedf->cdev) {
  2535. rc = -ENODEV;
  2536. goto err1;
  2537. }
  2538. /* Learn information crucial for qedf to progress */
  2539. rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
  2540. if (rc) {
  2541. QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
  2542. goto err1;
  2543. }
  2544. /* queue allocation code should come here
  2545. * order should be
  2546. * slowpath_start
  2547. * status block allocation
  2548. * interrupt registration (to get min number of queues)
  2549. * set_fcoe_pf_param
  2550. * qed_sp_fcoe_func_start
  2551. */
  2552. rc = qedf_set_fcoe_pf_param(qedf);
  2553. if (rc) {
  2554. QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
  2555. goto err2;
  2556. }
  2557. qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
  2558. /* Record BDQ producer doorbell addresses */
  2559. qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
  2560. qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
  2561. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2562. "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
  2563. qedf->bdq_secondary_prod);
  2564. qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
  2565. rc = qedf_prepare_sb(qedf);
  2566. if (rc) {
  2567. QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
  2568. goto err2;
  2569. }
  2570. /* Start the Slowpath-process */
  2571. slowpath_params.int_mode = QED_INT_MODE_MSIX;
  2572. slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
  2573. slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
  2574. slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
  2575. slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
  2576. strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
  2577. rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
  2578. if (rc) {
  2579. QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
  2580. goto err2;
  2581. }
  2582. /*
  2583. * update_pf_params needs to be called before and after slowpath
  2584. * start
  2585. */
  2586. qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
  2587. /* Setup interrupts */
  2588. rc = qedf_setup_int(qedf);
  2589. if (rc)
  2590. goto err3;
  2591. rc = qed_ops->start(qedf->cdev, &qedf->tasks);
  2592. if (rc) {
  2593. QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
  2594. goto err4;
  2595. }
  2596. task_start = qedf_get_task_mem(&qedf->tasks, 0);
  2597. task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
  2598. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
  2599. "end=%p block_size=%u.\n", task_start, task_end,
  2600. qedf->tasks.size);
  2601. /*
  2602. * We need to write the number of BDs in the BDQ we've preallocated so
  2603. * the f/w will do a prefetch and we'll get an unsolicited CQE when a
  2604. * packet arrives.
  2605. */
  2606. qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
  2607. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2608. "Writing %d to primary and secondary BDQ doorbell registers.\n",
  2609. qedf->bdq_prod_idx);
  2610. writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
  2611. tmp = readw(qedf->bdq_primary_prod);
  2612. writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
  2613. tmp = readw(qedf->bdq_secondary_prod);
  2614. qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
  2615. /* Now that the dev_info struct has been filled in set the MAC
  2616. * address
  2617. */
  2618. ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac);
  2619. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
  2620. qedf->mac);
  2621. /*
  2622. * Set the WWNN and WWPN in the following way:
  2623. *
  2624. * If the info we get from qed is non-zero then use that to set the
  2625. * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based
  2626. * on the MAC address.
  2627. */
  2628. if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
  2629. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2630. "Setting WWPN and WWNN from qed dev_info.\n");
  2631. qedf->wwnn = qedf->dev_info.wwnn;
  2632. qedf->wwpn = qedf->dev_info.wwpn;
  2633. } else {
  2634. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2635. "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n");
  2636. qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
  2637. qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
  2638. }
  2639. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx "
  2640. "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
  2641. sprintf(host_buf, "host_%d", host->host_no);
  2642. qed_ops->common->set_name(qedf->cdev, host_buf);
  2643. /* Set xid max values */
  2644. qedf->max_scsi_xid = QEDF_MAX_SCSI_XID;
  2645. qedf->max_els_xid = QEDF_MAX_ELS_XID;
  2646. /* Allocate cmd mgr */
  2647. qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
  2648. if (!qedf->cmd_mgr) {
  2649. QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
  2650. goto err5;
  2651. }
  2652. if (mode != QEDF_MODE_RECOVERY) {
  2653. host->transportt = qedf_fc_transport_template;
  2654. host->can_queue = QEDF_MAX_ELS_XID;
  2655. host->max_lun = qedf_max_lun;
  2656. host->max_cmd_len = QEDF_MAX_CDB_LEN;
  2657. rc = scsi_add_host(host, &pdev->dev);
  2658. if (rc)
  2659. goto err6;
  2660. }
  2661. memset(&params, 0, sizeof(params));
  2662. params.mtu = 9000;
  2663. ether_addr_copy(params.ll2_mac_address, qedf->mac);
  2664. /* Start LL2 processing thread */
  2665. snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
  2666. qedf->ll2_recv_wq =
  2667. create_workqueue(host_buf);
  2668. if (!qedf->ll2_recv_wq) {
  2669. QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
  2670. goto err7;
  2671. }
  2672. #ifdef CONFIG_DEBUG_FS
  2673. qedf_dbg_host_init(&(qedf->dbg_ctx), &qedf_debugfs_ops,
  2674. &qedf_dbg_fops);
  2675. #endif
  2676. /* Start LL2 */
  2677. qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
  2678. rc = qed_ops->ll2->start(qedf->cdev, &params);
  2679. if (rc) {
  2680. QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
  2681. goto err7;
  2682. }
  2683. set_bit(QEDF_LL2_STARTED, &qedf->flags);
  2684. /* Set initial FIP/FCoE VLAN to NULL */
  2685. qedf->vlan_id = 0;
  2686. /*
  2687. * No need to setup fcoe_ctlr or fc_lport objects during recovery since
  2688. * they were not reaped during the unload process.
  2689. */
  2690. if (mode != QEDF_MODE_RECOVERY) {
  2691. /* Setup imbedded fcoe controller */
  2692. qedf_fcoe_ctlr_setup(qedf);
  2693. /* Setup lport */
  2694. rc = qedf_lport_setup(qedf);
  2695. if (rc) {
  2696. QEDF_ERR(&(qedf->dbg_ctx),
  2697. "qedf_lport_setup failed.\n");
  2698. goto err7;
  2699. }
  2700. }
  2701. sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
  2702. qedf->timer_work_queue =
  2703. create_workqueue(host_buf);
  2704. if (!qedf->timer_work_queue) {
  2705. QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
  2706. "workqueue.\n");
  2707. goto err7;
  2708. }
  2709. /* DPC workqueue is not reaped during recovery unload */
  2710. if (mode != QEDF_MODE_RECOVERY) {
  2711. sprintf(host_buf, "qedf_%u_dpc",
  2712. qedf->lport->host->host_no);
  2713. qedf->dpc_wq = create_workqueue(host_buf);
  2714. }
  2715. /*
  2716. * GRC dump and sysfs parameters are not reaped during the recovery
  2717. * unload process.
  2718. */
  2719. if (mode != QEDF_MODE_RECOVERY) {
  2720. qedf->grcdump_size = qed_ops->common->dbg_grc_size(qedf->cdev);
  2721. if (qedf->grcdump_size) {
  2722. rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
  2723. qedf->grcdump_size);
  2724. if (rc) {
  2725. QEDF_ERR(&(qedf->dbg_ctx),
  2726. "GRC Dump buffer alloc failed.\n");
  2727. qedf->grcdump = NULL;
  2728. }
  2729. QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
  2730. "grcdump: addr=%p, size=%u.\n",
  2731. qedf->grcdump, qedf->grcdump_size);
  2732. }
  2733. qedf_create_sysfs_ctx_attr(qedf);
  2734. /* Initialize I/O tracing for this adapter */
  2735. spin_lock_init(&qedf->io_trace_lock);
  2736. qedf->io_trace_idx = 0;
  2737. }
  2738. init_completion(&qedf->flogi_compl);
  2739. memset(&link_params, 0, sizeof(struct qed_link_params));
  2740. link_params.link_up = true;
  2741. status = qed_ops->common->set_link(qedf->cdev, &link_params);
  2742. if (status)
  2743. QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
  2744. /* Start/restart discovery */
  2745. if (mode == QEDF_MODE_RECOVERY)
  2746. fcoe_ctlr_link_up(&qedf->ctlr);
  2747. else
  2748. fc_fabric_login(lport);
  2749. /* All good */
  2750. return 0;
  2751. err7:
  2752. if (qedf->ll2_recv_wq)
  2753. destroy_workqueue(qedf->ll2_recv_wq);
  2754. fc_remove_host(qedf->lport->host);
  2755. scsi_remove_host(qedf->lport->host);
  2756. #ifdef CONFIG_DEBUG_FS
  2757. qedf_dbg_host_exit(&(qedf->dbg_ctx));
  2758. #endif
  2759. err6:
  2760. qedf_cmd_mgr_free(qedf->cmd_mgr);
  2761. err5:
  2762. qed_ops->stop(qedf->cdev);
  2763. err4:
  2764. qedf_free_fcoe_pf_param(qedf);
  2765. qedf_sync_free_irqs(qedf);
  2766. err3:
  2767. qed_ops->common->slowpath_stop(qedf->cdev);
  2768. err2:
  2769. qed_ops->common->remove(qedf->cdev);
  2770. err1:
  2771. scsi_host_put(lport->host);
  2772. err0:
  2773. return rc;
  2774. }
  2775. static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  2776. {
  2777. return __qedf_probe(pdev, QEDF_MODE_NORMAL);
  2778. }
  2779. static void __qedf_remove(struct pci_dev *pdev, int mode)
  2780. {
  2781. struct qedf_ctx *qedf;
  2782. if (!pdev) {
  2783. QEDF_ERR(NULL, "pdev is NULL.\n");
  2784. return;
  2785. }
  2786. qedf = pci_get_drvdata(pdev);
  2787. /*
  2788. * Prevent race where we're in board disable work and then try to
  2789. * rmmod the module.
  2790. */
  2791. if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
  2792. QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
  2793. return;
  2794. }
  2795. if (mode != QEDF_MODE_RECOVERY)
  2796. set_bit(QEDF_UNLOADING, &qedf->flags);
  2797. /* Logoff the fabric to upload all connections */
  2798. if (mode == QEDF_MODE_RECOVERY)
  2799. fcoe_ctlr_link_down(&qedf->ctlr);
  2800. else
  2801. fc_fabric_logoff(qedf->lport);
  2802. qedf_wait_for_upload(qedf);
  2803. #ifdef CONFIG_DEBUG_FS
  2804. qedf_dbg_host_exit(&(qedf->dbg_ctx));
  2805. #endif
  2806. /* Stop any link update handling */
  2807. cancel_delayed_work_sync(&qedf->link_update);
  2808. destroy_workqueue(qedf->link_update_wq);
  2809. qedf->link_update_wq = NULL;
  2810. if (qedf->timer_work_queue)
  2811. destroy_workqueue(qedf->timer_work_queue);
  2812. /* Stop Light L2 */
  2813. clear_bit(QEDF_LL2_STARTED, &qedf->flags);
  2814. qed_ops->ll2->stop(qedf->cdev);
  2815. if (qedf->ll2_recv_wq)
  2816. destroy_workqueue(qedf->ll2_recv_wq);
  2817. /* Stop fastpath */
  2818. qedf_sync_free_irqs(qedf);
  2819. qedf_destroy_sb(qedf);
  2820. /*
  2821. * During recovery don't destroy OS constructs that represent the
  2822. * physical port.
  2823. */
  2824. if (mode != QEDF_MODE_RECOVERY) {
  2825. qedf_free_grc_dump_buf(&qedf->grcdump);
  2826. qedf_remove_sysfs_ctx_attr(qedf);
  2827. /* Remove all SCSI/libfc/libfcoe structures */
  2828. fcoe_ctlr_destroy(&qedf->ctlr);
  2829. fc_lport_destroy(qedf->lport);
  2830. fc_remove_host(qedf->lport->host);
  2831. scsi_remove_host(qedf->lport->host);
  2832. }
  2833. qedf_cmd_mgr_free(qedf->cmd_mgr);
  2834. if (mode != QEDF_MODE_RECOVERY) {
  2835. fc_exch_mgr_free(qedf->lport);
  2836. fc_lport_free_stats(qedf->lport);
  2837. /* Wait for all vports to be reaped */
  2838. qedf_wait_for_vport_destroy(qedf);
  2839. }
  2840. /*
  2841. * Now that all connections have been uploaded we can stop the
  2842. * rest of the qed operations
  2843. */
  2844. qed_ops->stop(qedf->cdev);
  2845. if (mode != QEDF_MODE_RECOVERY) {
  2846. if (qedf->dpc_wq) {
  2847. /* Stop general DPC handling */
  2848. destroy_workqueue(qedf->dpc_wq);
  2849. qedf->dpc_wq = NULL;
  2850. }
  2851. }
  2852. /* Final shutdown for the board */
  2853. qedf_free_fcoe_pf_param(qedf);
  2854. if (mode != QEDF_MODE_RECOVERY) {
  2855. qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
  2856. pci_set_drvdata(pdev, NULL);
  2857. }
  2858. qed_ops->common->slowpath_stop(qedf->cdev);
  2859. qed_ops->common->remove(qedf->cdev);
  2860. mempool_destroy(qedf->io_mempool);
  2861. /* Only reap the Scsi_host on a real removal */
  2862. if (mode != QEDF_MODE_RECOVERY)
  2863. scsi_host_put(qedf->lport->host);
  2864. }
  2865. static void qedf_remove(struct pci_dev *pdev)
  2866. {
  2867. /* Check to make sure this function wasn't already disabled */
  2868. if (!atomic_read(&pdev->enable_cnt))
  2869. return;
  2870. __qedf_remove(pdev, QEDF_MODE_NORMAL);
  2871. }
  2872. /*
  2873. * Module Init/Remove
  2874. */
  2875. static int __init qedf_init(void)
  2876. {
  2877. int ret;
  2878. /* If debug=1 passed, set the default log mask */
  2879. if (qedf_debug == QEDF_LOG_DEFAULT)
  2880. qedf_debug = QEDF_DEFAULT_LOG_MASK;
  2881. /* Print driver banner */
  2882. QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
  2883. QEDF_VERSION);
  2884. /* Create kmem_cache for qedf_io_work structs */
  2885. qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache",
  2886. sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL);
  2887. if (qedf_io_work_cache == NULL) {
  2888. QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n");
  2889. goto err1;
  2890. }
  2891. QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n",
  2892. qedf_io_work_cache);
  2893. qed_ops = qed_get_fcoe_ops();
  2894. if (!qed_ops) {
  2895. QEDF_ERR(NULL, "Failed to get qed fcoe operations\n");
  2896. goto err1;
  2897. }
  2898. #ifdef CONFIG_DEBUG_FS
  2899. qedf_dbg_init("qedf");
  2900. #endif
  2901. qedf_fc_transport_template =
  2902. fc_attach_transport(&qedf_fc_transport_fn);
  2903. if (!qedf_fc_transport_template) {
  2904. QEDF_ERR(NULL, "Could not register with FC transport\n");
  2905. goto err2;
  2906. }
  2907. qedf_fc_vport_transport_template =
  2908. fc_attach_transport(&qedf_fc_vport_transport_fn);
  2909. if (!qedf_fc_vport_transport_template) {
  2910. QEDF_ERR(NULL, "Could not register vport template with FC "
  2911. "transport\n");
  2912. goto err3;
  2913. }
  2914. qedf_io_wq = create_workqueue("qedf_io_wq");
  2915. if (!qedf_io_wq) {
  2916. QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
  2917. goto err4;
  2918. }
  2919. qedf_cb_ops.get_login_failures = qedf_get_login_failures;
  2920. ret = pci_register_driver(&qedf_pci_driver);
  2921. if (ret) {
  2922. QEDF_ERR(NULL, "Failed to register driver\n");
  2923. goto err5;
  2924. }
  2925. return 0;
  2926. err5:
  2927. destroy_workqueue(qedf_io_wq);
  2928. err4:
  2929. fc_release_transport(qedf_fc_vport_transport_template);
  2930. err3:
  2931. fc_release_transport(qedf_fc_transport_template);
  2932. err2:
  2933. #ifdef CONFIG_DEBUG_FS
  2934. qedf_dbg_exit();
  2935. #endif
  2936. qed_put_fcoe_ops();
  2937. err1:
  2938. return -EINVAL;
  2939. }
  2940. static void __exit qedf_cleanup(void)
  2941. {
  2942. pci_unregister_driver(&qedf_pci_driver);
  2943. destroy_workqueue(qedf_io_wq);
  2944. fc_release_transport(qedf_fc_vport_transport_template);
  2945. fc_release_transport(qedf_fc_transport_template);
  2946. #ifdef CONFIG_DEBUG_FS
  2947. qedf_dbg_exit();
  2948. #endif
  2949. qed_put_fcoe_ops();
  2950. kmem_cache_destroy(qedf_io_work_cache);
  2951. }
  2952. MODULE_LICENSE("GPL");
  2953. MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver");
  2954. MODULE_AUTHOR("QLogic Corporation");
  2955. MODULE_VERSION(QEDF_VERSION);
  2956. module_init(qedf_init);
  2957. module_exit(qedf_cleanup);