qla3xxx.c 101 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946
  1. /*
  2. * QLogic QLA3xxx NIC HBA Driver
  3. * Copyright (c) 2003-2006 QLogic Corporation
  4. *
  5. * See LICENSE.qla3xxx for copyright and licensing details.
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/kernel.h>
  9. #include <linux/types.h>
  10. #include <linux/module.h>
  11. #include <linux/list.h>
  12. #include <linux/pci.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/dmapool.h>
  17. #include <linux/mempool.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/kthread.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/errno.h>
  22. #include <linux/ioport.h>
  23. #include <linux/ip.h>
  24. #include <linux/in.h>
  25. #include <linux/if_arp.h>
  26. #include <linux/if_ether.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/ethtool.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/rtnetlink.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/delay.h>
  34. #include <linux/mm.h>
  35. #include <linux/prefetch.h>
  36. #include "qla3xxx.h"
  37. #define DRV_NAME "qla3xxx"
  38. #define DRV_STRING "QLogic ISP3XXX Network Driver"
  39. #define DRV_VERSION "v2.03.00-k5"
  40. static const char ql3xxx_driver_name[] = DRV_NAME;
  41. static const char ql3xxx_driver_version[] = DRV_VERSION;
  42. #define TIMED_OUT_MSG \
  43. "Timed out waiting for management port to get free before issuing command\n"
  44. MODULE_AUTHOR("QLogic Corporation");
  45. MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
  46. MODULE_LICENSE("GPL");
  47. MODULE_VERSION(DRV_VERSION);
  48. static const u32 default_msg
  49. = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  50. | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  51. static int debug = -1; /* defaults above */
  52. module_param(debug, int, 0);
  53. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  54. static int msi;
  55. module_param(msi, int, 0);
  56. MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
  57. static const struct pci_device_id ql3xxx_pci_tbl[] = {
  58. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
  59. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
  60. /* required last entry */
  61. {0,}
  62. };
  63. MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
  64. /*
  65. * These are the known PHY's which are used
  66. */
  67. enum PHY_DEVICE_TYPE {
  68. PHY_TYPE_UNKNOWN = 0,
  69. PHY_VITESSE_VSC8211,
  70. PHY_AGERE_ET1011C,
  71. MAX_PHY_DEV_TYPES
  72. };
  73. struct PHY_DEVICE_INFO {
  74. const enum PHY_DEVICE_TYPE phyDevice;
  75. const u32 phyIdOUI;
  76. const u16 phyIdModel;
  77. const char *name;
  78. };
  79. static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
  80. {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
  81. {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
  82. {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
  83. };
  84. /*
  85. * Caller must take hw_lock.
  86. */
  87. static int ql_sem_spinlock(struct ql3_adapter *qdev,
  88. u32 sem_mask, u32 sem_bits)
  89. {
  90. struct ql3xxx_port_registers __iomem *port_regs =
  91. qdev->mem_map_registers;
  92. u32 value;
  93. unsigned int seconds = 3;
  94. do {
  95. writel((sem_mask | sem_bits),
  96. &port_regs->CommonRegs.semaphoreReg);
  97. value = readl(&port_regs->CommonRegs.semaphoreReg);
  98. if ((value & (sem_mask >> 16)) == sem_bits)
  99. return 0;
  100. ssleep(1);
  101. } while (--seconds);
  102. return -1;
  103. }
  104. static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
  105. {
  106. struct ql3xxx_port_registers __iomem *port_regs =
  107. qdev->mem_map_registers;
  108. writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
  109. readl(&port_regs->CommonRegs.semaphoreReg);
  110. }
  111. static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
  112. {
  113. struct ql3xxx_port_registers __iomem *port_regs =
  114. qdev->mem_map_registers;
  115. u32 value;
  116. writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
  117. value = readl(&port_regs->CommonRegs.semaphoreReg);
  118. return ((value & (sem_mask >> 16)) == sem_bits);
  119. }
  120. /*
  121. * Caller holds hw_lock.
  122. */
  123. static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
  124. {
  125. int i = 0;
  126. do {
  127. if (ql_sem_lock(qdev,
  128. QL_DRVR_SEM_MASK,
  129. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
  130. * 2) << 1)) {
  131. netdev_printk(KERN_DEBUG, qdev->ndev,
  132. "driver lock acquired\n");
  133. return 1;
  134. }
  135. ssleep(1);
  136. } while (++i < 10);
  137. netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
  138. return 0;
  139. }
  140. static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
  141. {
  142. struct ql3xxx_port_registers __iomem *port_regs =
  143. qdev->mem_map_registers;
  144. writel(((ISP_CONTROL_NP_MASK << 16) | page),
  145. &port_regs->CommonRegs.ispControlStatus);
  146. readl(&port_regs->CommonRegs.ispControlStatus);
  147. qdev->current_page = page;
  148. }
  149. static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
  150. {
  151. u32 value;
  152. unsigned long hw_flags;
  153. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  154. value = readl(reg);
  155. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  156. return value;
  157. }
  158. static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
  159. {
  160. return readl(reg);
  161. }
  162. static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
  163. {
  164. u32 value;
  165. unsigned long hw_flags;
  166. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  167. if (qdev->current_page != 0)
  168. ql_set_register_page(qdev, 0);
  169. value = readl(reg);
  170. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  171. return value;
  172. }
  173. static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
  174. {
  175. if (qdev->current_page != 0)
  176. ql_set_register_page(qdev, 0);
  177. return readl(reg);
  178. }
  179. static void ql_write_common_reg_l(struct ql3_adapter *qdev,
  180. u32 __iomem *reg, u32 value)
  181. {
  182. unsigned long hw_flags;
  183. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  184. writel(value, reg);
  185. readl(reg);
  186. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  187. }
  188. static void ql_write_common_reg(struct ql3_adapter *qdev,
  189. u32 __iomem *reg, u32 value)
  190. {
  191. writel(value, reg);
  192. readl(reg);
  193. }
  194. static void ql_write_nvram_reg(struct ql3_adapter *qdev,
  195. u32 __iomem *reg, u32 value)
  196. {
  197. writel(value, reg);
  198. readl(reg);
  199. udelay(1);
  200. }
  201. static void ql_write_page0_reg(struct ql3_adapter *qdev,
  202. u32 __iomem *reg, u32 value)
  203. {
  204. if (qdev->current_page != 0)
  205. ql_set_register_page(qdev, 0);
  206. writel(value, reg);
  207. readl(reg);
  208. }
  209. /*
  210. * Caller holds hw_lock. Only called during init.
  211. */
  212. static void ql_write_page1_reg(struct ql3_adapter *qdev,
  213. u32 __iomem *reg, u32 value)
  214. {
  215. if (qdev->current_page != 1)
  216. ql_set_register_page(qdev, 1);
  217. writel(value, reg);
  218. readl(reg);
  219. }
  220. /*
  221. * Caller holds hw_lock. Only called during init.
  222. */
  223. static void ql_write_page2_reg(struct ql3_adapter *qdev,
  224. u32 __iomem *reg, u32 value)
  225. {
  226. if (qdev->current_page != 2)
  227. ql_set_register_page(qdev, 2);
  228. writel(value, reg);
  229. readl(reg);
  230. }
  231. static void ql_disable_interrupts(struct ql3_adapter *qdev)
  232. {
  233. struct ql3xxx_port_registers __iomem *port_regs =
  234. qdev->mem_map_registers;
  235. ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
  236. (ISP_IMR_ENABLE_INT << 16));
  237. }
  238. static void ql_enable_interrupts(struct ql3_adapter *qdev)
  239. {
  240. struct ql3xxx_port_registers __iomem *port_regs =
  241. qdev->mem_map_registers;
  242. ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
  243. ((0xff << 16) | ISP_IMR_ENABLE_INT));
  244. }
  245. static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
  246. struct ql_rcv_buf_cb *lrg_buf_cb)
  247. {
  248. dma_addr_t map;
  249. int err;
  250. lrg_buf_cb->next = NULL;
  251. if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
  252. qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
  253. } else {
  254. qdev->lrg_buf_free_tail->next = lrg_buf_cb;
  255. qdev->lrg_buf_free_tail = lrg_buf_cb;
  256. }
  257. if (!lrg_buf_cb->skb) {
  258. lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
  259. qdev->lrg_buffer_len);
  260. if (unlikely(!lrg_buf_cb->skb)) {
  261. qdev->lrg_buf_skb_check++;
  262. } else {
  263. /*
  264. * We save some space to copy the ethhdr from first
  265. * buffer
  266. */
  267. skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
  268. map = pci_map_single(qdev->pdev,
  269. lrg_buf_cb->skb->data,
  270. qdev->lrg_buffer_len -
  271. QL_HEADER_SPACE,
  272. PCI_DMA_FROMDEVICE);
  273. err = pci_dma_mapping_error(qdev->pdev, map);
  274. if (err) {
  275. netdev_err(qdev->ndev,
  276. "PCI mapping failed with error: %d\n",
  277. err);
  278. dev_kfree_skb(lrg_buf_cb->skb);
  279. lrg_buf_cb->skb = NULL;
  280. qdev->lrg_buf_skb_check++;
  281. return;
  282. }
  283. lrg_buf_cb->buf_phy_addr_low =
  284. cpu_to_le32(LS_64BITS(map));
  285. lrg_buf_cb->buf_phy_addr_high =
  286. cpu_to_le32(MS_64BITS(map));
  287. dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  288. dma_unmap_len_set(lrg_buf_cb, maplen,
  289. qdev->lrg_buffer_len -
  290. QL_HEADER_SPACE);
  291. }
  292. }
  293. qdev->lrg_buf_free_count++;
  294. }
  295. static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
  296. *qdev)
  297. {
  298. struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
  299. if (lrg_buf_cb != NULL) {
  300. qdev->lrg_buf_free_head = lrg_buf_cb->next;
  301. if (qdev->lrg_buf_free_head == NULL)
  302. qdev->lrg_buf_free_tail = NULL;
  303. qdev->lrg_buf_free_count--;
  304. }
  305. return lrg_buf_cb;
  306. }
  307. static u32 addrBits = EEPROM_NO_ADDR_BITS;
  308. static u32 dataBits = EEPROM_NO_DATA_BITS;
  309. static void fm93c56a_deselect(struct ql3_adapter *qdev);
  310. static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
  311. unsigned short *value);
  312. /*
  313. * Caller holds hw_lock.
  314. */
  315. static void fm93c56a_select(struct ql3_adapter *qdev)
  316. {
  317. struct ql3xxx_port_registers __iomem *port_regs =
  318. qdev->mem_map_registers;
  319. __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
  320. qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
  321. ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
  322. ql_write_nvram_reg(qdev, spir,
  323. ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
  324. }
  325. /*
  326. * Caller holds hw_lock.
  327. */
  328. static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
  329. {
  330. int i;
  331. u32 mask;
  332. u32 dataBit;
  333. u32 previousBit;
  334. struct ql3xxx_port_registers __iomem *port_regs =
  335. qdev->mem_map_registers;
  336. __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
  337. /* Clock in a zero, then do the start bit */
  338. ql_write_nvram_reg(qdev, spir,
  339. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  340. AUBURN_EEPROM_DO_1));
  341. ql_write_nvram_reg(qdev, spir,
  342. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  343. AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
  344. ql_write_nvram_reg(qdev, spir,
  345. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  346. AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
  347. mask = 1 << (FM93C56A_CMD_BITS - 1);
  348. /* Force the previous data bit to be different */
  349. previousBit = 0xffff;
  350. for (i = 0; i < FM93C56A_CMD_BITS; i++) {
  351. dataBit = (cmd & mask)
  352. ? AUBURN_EEPROM_DO_1
  353. : AUBURN_EEPROM_DO_0;
  354. if (previousBit != dataBit) {
  355. /* If the bit changed, change the DO state to match */
  356. ql_write_nvram_reg(qdev, spir,
  357. (ISP_NVRAM_MASK |
  358. qdev->eeprom_cmd_data | dataBit));
  359. previousBit = dataBit;
  360. }
  361. ql_write_nvram_reg(qdev, spir,
  362. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  363. dataBit | AUBURN_EEPROM_CLK_RISE));
  364. ql_write_nvram_reg(qdev, spir,
  365. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  366. dataBit | AUBURN_EEPROM_CLK_FALL));
  367. cmd = cmd << 1;
  368. }
  369. mask = 1 << (addrBits - 1);
  370. /* Force the previous data bit to be different */
  371. previousBit = 0xffff;
  372. for (i = 0; i < addrBits; i++) {
  373. dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
  374. : AUBURN_EEPROM_DO_0;
  375. if (previousBit != dataBit) {
  376. /*
  377. * If the bit changed, then change the DO state to
  378. * match
  379. */
  380. ql_write_nvram_reg(qdev, spir,
  381. (ISP_NVRAM_MASK |
  382. qdev->eeprom_cmd_data | dataBit));
  383. previousBit = dataBit;
  384. }
  385. ql_write_nvram_reg(qdev, spir,
  386. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  387. dataBit | AUBURN_EEPROM_CLK_RISE));
  388. ql_write_nvram_reg(qdev, spir,
  389. (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  390. dataBit | AUBURN_EEPROM_CLK_FALL));
  391. eepromAddr = eepromAddr << 1;
  392. }
  393. }
  394. /*
  395. * Caller holds hw_lock.
  396. */
  397. static void fm93c56a_deselect(struct ql3_adapter *qdev)
  398. {
  399. struct ql3xxx_port_registers __iomem *port_regs =
  400. qdev->mem_map_registers;
  401. __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
  402. qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
  403. ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
  404. }
  405. /*
  406. * Caller holds hw_lock.
  407. */
  408. static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
  409. {
  410. int i;
  411. u32 data = 0;
  412. u32 dataBit;
  413. struct ql3xxx_port_registers __iomem *port_regs =
  414. qdev->mem_map_registers;
  415. __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
  416. /* Read the data bits */
  417. /* The first bit is a dummy. Clock right over it. */
  418. for (i = 0; i < dataBits; i++) {
  419. ql_write_nvram_reg(qdev, spir,
  420. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  421. AUBURN_EEPROM_CLK_RISE);
  422. ql_write_nvram_reg(qdev, spir,
  423. ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
  424. AUBURN_EEPROM_CLK_FALL);
  425. dataBit = (ql_read_common_reg(qdev, spir) &
  426. AUBURN_EEPROM_DI_1) ? 1 : 0;
  427. data = (data << 1) | dataBit;
  428. }
  429. *value = (u16)data;
  430. }
  431. /*
  432. * Caller holds hw_lock.
  433. */
  434. static void eeprom_readword(struct ql3_adapter *qdev,
  435. u32 eepromAddr, unsigned short *value)
  436. {
  437. fm93c56a_select(qdev);
  438. fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
  439. fm93c56a_datain(qdev, value);
  440. fm93c56a_deselect(qdev);
  441. }
  442. static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
  443. {
  444. __le16 *p = (__le16 *)ndev->dev_addr;
  445. p[0] = cpu_to_le16(addr[0]);
  446. p[1] = cpu_to_le16(addr[1]);
  447. p[2] = cpu_to_le16(addr[2]);
  448. }
  449. static int ql_get_nvram_params(struct ql3_adapter *qdev)
  450. {
  451. u16 *pEEPROMData;
  452. u16 checksum = 0;
  453. u32 index;
  454. unsigned long hw_flags;
  455. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  456. pEEPROMData = (u16 *)&qdev->nvram_data;
  457. qdev->eeprom_cmd_data = 0;
  458. if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
  459. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  460. 2) << 10)) {
  461. pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
  462. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  463. return -1;
  464. }
  465. for (index = 0; index < EEPROM_SIZE; index++) {
  466. eeprom_readword(qdev, index, pEEPROMData);
  467. checksum += *pEEPROMData;
  468. pEEPROMData++;
  469. }
  470. ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
  471. if (checksum != 0) {
  472. netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
  473. checksum);
  474. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  475. return -1;
  476. }
  477. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  478. return checksum;
  479. }
  480. static const u32 PHYAddr[2] = {
  481. PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
  482. };
  483. static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
  484. {
  485. struct ql3xxx_port_registers __iomem *port_regs =
  486. qdev->mem_map_registers;
  487. u32 temp;
  488. int count = 1000;
  489. while (count) {
  490. temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
  491. if (!(temp & MAC_MII_STATUS_BSY))
  492. return 0;
  493. udelay(10);
  494. count--;
  495. }
  496. return -1;
  497. }
  498. static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
  499. {
  500. struct ql3xxx_port_registers __iomem *port_regs =
  501. qdev->mem_map_registers;
  502. u32 scanControl;
  503. if (qdev->numPorts > 1) {
  504. /* Auto scan will cycle through multiple ports */
  505. scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
  506. } else {
  507. scanControl = MAC_MII_CONTROL_SC;
  508. }
  509. /*
  510. * Scan register 1 of PHY/PETBI,
  511. * Set up to scan both devices
  512. * The autoscan starts from the first register, completes
  513. * the last one before rolling over to the first
  514. */
  515. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  516. PHYAddr[0] | MII_SCAN_REGISTER);
  517. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  518. (scanControl) |
  519. ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
  520. }
  521. static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
  522. {
  523. u8 ret;
  524. struct ql3xxx_port_registers __iomem *port_regs =
  525. qdev->mem_map_registers;
  526. /* See if scan mode is enabled before we turn it off */
  527. if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
  528. (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
  529. /* Scan is enabled */
  530. ret = 1;
  531. } else {
  532. /* Scan is disabled */
  533. ret = 0;
  534. }
  535. /*
  536. * When disabling scan mode you must first change the MII register
  537. * address
  538. */
  539. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  540. PHYAddr[0] | MII_SCAN_REGISTER);
  541. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  542. ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
  543. MAC_MII_CONTROL_RC) << 16));
  544. return ret;
  545. }
  546. static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
  547. u16 regAddr, u16 value, u32 phyAddr)
  548. {
  549. struct ql3xxx_port_registers __iomem *port_regs =
  550. qdev->mem_map_registers;
  551. u8 scanWasEnabled;
  552. scanWasEnabled = ql_mii_disable_scan_mode(qdev);
  553. if (ql_wait_for_mii_ready(qdev)) {
  554. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  555. return -1;
  556. }
  557. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  558. phyAddr | regAddr);
  559. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
  560. /* Wait for write to complete 9/10/04 SJP */
  561. if (ql_wait_for_mii_ready(qdev)) {
  562. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  563. return -1;
  564. }
  565. if (scanWasEnabled)
  566. ql_mii_enable_scan_mode(qdev);
  567. return 0;
  568. }
  569. static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
  570. u16 *value, u32 phyAddr)
  571. {
  572. struct ql3xxx_port_registers __iomem *port_regs =
  573. qdev->mem_map_registers;
  574. u8 scanWasEnabled;
  575. u32 temp;
  576. scanWasEnabled = ql_mii_disable_scan_mode(qdev);
  577. if (ql_wait_for_mii_ready(qdev)) {
  578. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  579. return -1;
  580. }
  581. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  582. phyAddr | regAddr);
  583. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  584. (MAC_MII_CONTROL_RC << 16));
  585. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  586. (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
  587. /* Wait for the read to complete */
  588. if (ql_wait_for_mii_ready(qdev)) {
  589. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  590. return -1;
  591. }
  592. temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
  593. *value = (u16) temp;
  594. if (scanWasEnabled)
  595. ql_mii_enable_scan_mode(qdev);
  596. return 0;
  597. }
  598. static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
  599. {
  600. struct ql3xxx_port_registers __iomem *port_regs =
  601. qdev->mem_map_registers;
  602. ql_mii_disable_scan_mode(qdev);
  603. if (ql_wait_for_mii_ready(qdev)) {
  604. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  605. return -1;
  606. }
  607. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  608. qdev->PHYAddr | regAddr);
  609. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
  610. /* Wait for write to complete. */
  611. if (ql_wait_for_mii_ready(qdev)) {
  612. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  613. return -1;
  614. }
  615. ql_mii_enable_scan_mode(qdev);
  616. return 0;
  617. }
  618. static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
  619. {
  620. u32 temp;
  621. struct ql3xxx_port_registers __iomem *port_regs =
  622. qdev->mem_map_registers;
  623. ql_mii_disable_scan_mode(qdev);
  624. if (ql_wait_for_mii_ready(qdev)) {
  625. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  626. return -1;
  627. }
  628. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
  629. qdev->PHYAddr | regAddr);
  630. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  631. (MAC_MII_CONTROL_RC << 16));
  632. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  633. (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
  634. /* Wait for the read to complete */
  635. if (ql_wait_for_mii_ready(qdev)) {
  636. netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
  637. return -1;
  638. }
  639. temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
  640. *value = (u16) temp;
  641. ql_mii_enable_scan_mode(qdev);
  642. return 0;
  643. }
  644. static void ql_petbi_reset(struct ql3_adapter *qdev)
  645. {
  646. ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
  647. }
  648. static void ql_petbi_start_neg(struct ql3_adapter *qdev)
  649. {
  650. u16 reg;
  651. /* Enable Auto-negotiation sense */
  652. ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
  653. reg |= PETBI_TBI_AUTO_SENSE;
  654. ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
  655. ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
  656. PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
  657. ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
  658. PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
  659. PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
  660. }
  661. static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
  662. {
  663. ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
  664. PHYAddr[qdev->mac_index]);
  665. }
  666. static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
  667. {
  668. u16 reg;
  669. /* Enable Auto-negotiation sense */
  670. ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
  671. PHYAddr[qdev->mac_index]);
  672. reg |= PETBI_TBI_AUTO_SENSE;
  673. ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
  674. PHYAddr[qdev->mac_index]);
  675. ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
  676. PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
  677. PHYAddr[qdev->mac_index]);
  678. ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
  679. PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
  680. PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
  681. PHYAddr[qdev->mac_index]);
  682. }
  683. static void ql_petbi_init(struct ql3_adapter *qdev)
  684. {
  685. ql_petbi_reset(qdev);
  686. ql_petbi_start_neg(qdev);
  687. }
  688. static void ql_petbi_init_ex(struct ql3_adapter *qdev)
  689. {
  690. ql_petbi_reset_ex(qdev);
  691. ql_petbi_start_neg_ex(qdev);
  692. }
  693. static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
  694. {
  695. u16 reg;
  696. if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
  697. return 0;
  698. return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
  699. }
  700. static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
  701. {
  702. netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
  703. /* power down device bit 11 = 1 */
  704. ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
  705. /* enable diagnostic mode bit 2 = 1 */
  706. ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
  707. /* 1000MB amplitude adjust (see Agere errata) */
  708. ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
  709. /* 1000MB amplitude adjust (see Agere errata) */
  710. ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
  711. /* 100MB amplitude adjust (see Agere errata) */
  712. ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
  713. /* 100MB amplitude adjust (see Agere errata) */
  714. ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
  715. /* 10MB amplitude adjust (see Agere errata) */
  716. ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
  717. /* 10MB amplitude adjust (see Agere errata) */
  718. ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
  719. /* point to hidden reg 0x2806 */
  720. ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
  721. /* Write new PHYAD w/bit 5 set */
  722. ql_mii_write_reg_ex(qdev, 0x11,
  723. 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
  724. /*
  725. * Disable diagnostic mode bit 2 = 0
  726. * Power up device bit 11 = 0
  727. * Link up (on) and activity (blink)
  728. */
  729. ql_mii_write_reg(qdev, 0x12, 0x840a);
  730. ql_mii_write_reg(qdev, 0x00, 0x1140);
  731. ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
  732. }
  733. static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
  734. u16 phyIdReg0, u16 phyIdReg1)
  735. {
  736. enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
  737. u32 oui;
  738. u16 model;
  739. int i;
  740. if (phyIdReg0 == 0xffff)
  741. return result;
  742. if (phyIdReg1 == 0xffff)
  743. return result;
  744. /* oui is split between two registers */
  745. oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
  746. model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
  747. /* Scan table for this PHY */
  748. for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
  749. if ((oui == PHY_DEVICES[i].phyIdOUI) &&
  750. (model == PHY_DEVICES[i].phyIdModel)) {
  751. netdev_info(qdev->ndev, "Phy: %s\n",
  752. PHY_DEVICES[i].name);
  753. result = PHY_DEVICES[i].phyDevice;
  754. break;
  755. }
  756. }
  757. return result;
  758. }
  759. static int ql_phy_get_speed(struct ql3_adapter *qdev)
  760. {
  761. u16 reg;
  762. switch (qdev->phyType) {
  763. case PHY_AGERE_ET1011C: {
  764. if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
  765. return 0;
  766. reg = (reg >> 8) & 3;
  767. break;
  768. }
  769. default:
  770. if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
  771. return 0;
  772. reg = (((reg & 0x18) >> 3) & 3);
  773. }
  774. switch (reg) {
  775. case 2:
  776. return SPEED_1000;
  777. case 1:
  778. return SPEED_100;
  779. case 0:
  780. return SPEED_10;
  781. default:
  782. return -1;
  783. }
  784. }
  785. static int ql_is_full_dup(struct ql3_adapter *qdev)
  786. {
  787. u16 reg;
  788. switch (qdev->phyType) {
  789. case PHY_AGERE_ET1011C: {
  790. if (ql_mii_read_reg(qdev, 0x1A, &reg))
  791. return 0;
  792. return ((reg & 0x0080) && (reg & 0x1000)) != 0;
  793. }
  794. case PHY_VITESSE_VSC8211:
  795. default: {
  796. if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
  797. return 0;
  798. return (reg & PHY_AUX_DUPLEX_STAT) != 0;
  799. }
  800. }
  801. }
  802. static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
  803. {
  804. u16 reg;
  805. if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
  806. return 0;
  807. return (reg & PHY_NEG_PAUSE) != 0;
  808. }
  809. static int PHY_Setup(struct ql3_adapter *qdev)
  810. {
  811. u16 reg1;
  812. u16 reg2;
  813. bool agereAddrChangeNeeded = false;
  814. u32 miiAddr = 0;
  815. int err;
  816. /* Determine the PHY we are using by reading the ID's */
  817. err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
  818. if (err != 0) {
  819. netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
  820. return err;
  821. }
  822. err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
  823. if (err != 0) {
  824. netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
  825. return err;
  826. }
  827. /* Check if we have a Agere PHY */
  828. if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
  829. /* Determine which MII address we should be using
  830. determined by the index of the card */
  831. if (qdev->mac_index == 0)
  832. miiAddr = MII_AGERE_ADDR_1;
  833. else
  834. miiAddr = MII_AGERE_ADDR_2;
  835. err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
  836. if (err != 0) {
  837. netdev_err(qdev->ndev,
  838. "Could not read from reg PHY_ID_0_REG after Agere detected\n");
  839. return err;
  840. }
  841. err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
  842. if (err != 0) {
  843. netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
  844. return err;
  845. }
  846. /* We need to remember to initialize the Agere PHY */
  847. agereAddrChangeNeeded = true;
  848. }
  849. /* Determine the particular PHY we have on board to apply
  850. PHY specific initializations */
  851. qdev->phyType = getPhyType(qdev, reg1, reg2);
  852. if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
  853. /* need this here so address gets changed */
  854. phyAgereSpecificInit(qdev, miiAddr);
  855. } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
  856. netdev_err(qdev->ndev, "PHY is unknown\n");
  857. return -EIO;
  858. }
  859. return 0;
  860. }
  861. /*
  862. * Caller holds hw_lock.
  863. */
  864. static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
  865. {
  866. struct ql3xxx_port_registers __iomem *port_regs =
  867. qdev->mem_map_registers;
  868. u32 value;
  869. if (enable)
  870. value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
  871. else
  872. value = (MAC_CONFIG_REG_PE << 16);
  873. if (qdev->mac_index)
  874. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  875. else
  876. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  877. }
  878. /*
  879. * Caller holds hw_lock.
  880. */
  881. static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
  882. {
  883. struct ql3xxx_port_registers __iomem *port_regs =
  884. qdev->mem_map_registers;
  885. u32 value;
  886. if (enable)
  887. value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
  888. else
  889. value = (MAC_CONFIG_REG_SR << 16);
  890. if (qdev->mac_index)
  891. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  892. else
  893. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  894. }
  895. /*
  896. * Caller holds hw_lock.
  897. */
  898. static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
  899. {
  900. struct ql3xxx_port_registers __iomem *port_regs =
  901. qdev->mem_map_registers;
  902. u32 value;
  903. if (enable)
  904. value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
  905. else
  906. value = (MAC_CONFIG_REG_GM << 16);
  907. if (qdev->mac_index)
  908. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  909. else
  910. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  911. }
  912. /*
  913. * Caller holds hw_lock.
  914. */
  915. static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
  916. {
  917. struct ql3xxx_port_registers __iomem *port_regs =
  918. qdev->mem_map_registers;
  919. u32 value;
  920. if (enable)
  921. value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
  922. else
  923. value = (MAC_CONFIG_REG_FD << 16);
  924. if (qdev->mac_index)
  925. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  926. else
  927. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  928. }
  929. /*
  930. * Caller holds hw_lock.
  931. */
  932. static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
  933. {
  934. struct ql3xxx_port_registers __iomem *port_regs =
  935. qdev->mem_map_registers;
  936. u32 value;
  937. if (enable)
  938. value =
  939. ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
  940. ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
  941. else
  942. value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
  943. if (qdev->mac_index)
  944. ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
  945. else
  946. ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
  947. }
  948. /*
  949. * Caller holds hw_lock.
  950. */
  951. static int ql_is_fiber(struct ql3_adapter *qdev)
  952. {
  953. struct ql3xxx_port_registers __iomem *port_regs =
  954. qdev->mem_map_registers;
  955. u32 bitToCheck = 0;
  956. u32 temp;
  957. switch (qdev->mac_index) {
  958. case 0:
  959. bitToCheck = PORT_STATUS_SM0;
  960. break;
  961. case 1:
  962. bitToCheck = PORT_STATUS_SM1;
  963. break;
  964. }
  965. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  966. return (temp & bitToCheck) != 0;
  967. }
  968. static int ql_is_auto_cfg(struct ql3_adapter *qdev)
  969. {
  970. u16 reg;
  971. ql_mii_read_reg(qdev, 0x00, &reg);
  972. return (reg & 0x1000) != 0;
  973. }
  974. /*
  975. * Caller holds hw_lock.
  976. */
  977. static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
  978. {
  979. struct ql3xxx_port_registers __iomem *port_regs =
  980. qdev->mem_map_registers;
  981. u32 bitToCheck = 0;
  982. u32 temp;
  983. switch (qdev->mac_index) {
  984. case 0:
  985. bitToCheck = PORT_STATUS_AC0;
  986. break;
  987. case 1:
  988. bitToCheck = PORT_STATUS_AC1;
  989. break;
  990. }
  991. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  992. if (temp & bitToCheck) {
  993. netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
  994. return 1;
  995. }
  996. netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
  997. return 0;
  998. }
  999. /*
  1000. * ql_is_neg_pause() returns 1 if pause was negotiated to be on
  1001. */
  1002. static int ql_is_neg_pause(struct ql3_adapter *qdev)
  1003. {
  1004. if (ql_is_fiber(qdev))
  1005. return ql_is_petbi_neg_pause(qdev);
  1006. else
  1007. return ql_is_phy_neg_pause(qdev);
  1008. }
  1009. static int ql_auto_neg_error(struct ql3_adapter *qdev)
  1010. {
  1011. struct ql3xxx_port_registers __iomem *port_regs =
  1012. qdev->mem_map_registers;
  1013. u32 bitToCheck = 0;
  1014. u32 temp;
  1015. switch (qdev->mac_index) {
  1016. case 0:
  1017. bitToCheck = PORT_STATUS_AE0;
  1018. break;
  1019. case 1:
  1020. bitToCheck = PORT_STATUS_AE1;
  1021. break;
  1022. }
  1023. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1024. return (temp & bitToCheck) != 0;
  1025. }
  1026. static u32 ql_get_link_speed(struct ql3_adapter *qdev)
  1027. {
  1028. if (ql_is_fiber(qdev))
  1029. return SPEED_1000;
  1030. else
  1031. return ql_phy_get_speed(qdev);
  1032. }
  1033. static int ql_is_link_full_dup(struct ql3_adapter *qdev)
  1034. {
  1035. if (ql_is_fiber(qdev))
  1036. return 1;
  1037. else
  1038. return ql_is_full_dup(qdev);
  1039. }
  1040. /*
  1041. * Caller holds hw_lock.
  1042. */
  1043. static int ql_link_down_detect(struct ql3_adapter *qdev)
  1044. {
  1045. struct ql3xxx_port_registers __iomem *port_regs =
  1046. qdev->mem_map_registers;
  1047. u32 bitToCheck = 0;
  1048. u32 temp;
  1049. switch (qdev->mac_index) {
  1050. case 0:
  1051. bitToCheck = ISP_CONTROL_LINK_DN_0;
  1052. break;
  1053. case 1:
  1054. bitToCheck = ISP_CONTROL_LINK_DN_1;
  1055. break;
  1056. }
  1057. temp =
  1058. ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
  1059. return (temp & bitToCheck) != 0;
  1060. }
  1061. /*
  1062. * Caller holds hw_lock.
  1063. */
  1064. static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
  1065. {
  1066. struct ql3xxx_port_registers __iomem *port_regs =
  1067. qdev->mem_map_registers;
  1068. switch (qdev->mac_index) {
  1069. case 0:
  1070. ql_write_common_reg(qdev,
  1071. &port_regs->CommonRegs.ispControlStatus,
  1072. (ISP_CONTROL_LINK_DN_0) |
  1073. (ISP_CONTROL_LINK_DN_0 << 16));
  1074. break;
  1075. case 1:
  1076. ql_write_common_reg(qdev,
  1077. &port_regs->CommonRegs.ispControlStatus,
  1078. (ISP_CONTROL_LINK_DN_1) |
  1079. (ISP_CONTROL_LINK_DN_1 << 16));
  1080. break;
  1081. default:
  1082. return 1;
  1083. }
  1084. return 0;
  1085. }
  1086. /*
  1087. * Caller holds hw_lock.
  1088. */
  1089. static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
  1090. {
  1091. struct ql3xxx_port_registers __iomem *port_regs =
  1092. qdev->mem_map_registers;
  1093. u32 bitToCheck = 0;
  1094. u32 temp;
  1095. switch (qdev->mac_index) {
  1096. case 0:
  1097. bitToCheck = PORT_STATUS_F1_ENABLED;
  1098. break;
  1099. case 1:
  1100. bitToCheck = PORT_STATUS_F3_ENABLED;
  1101. break;
  1102. default:
  1103. break;
  1104. }
  1105. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1106. if (temp & bitToCheck) {
  1107. netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
  1108. "not link master\n");
  1109. return 0;
  1110. }
  1111. netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
  1112. return 1;
  1113. }
  1114. static void ql_phy_reset_ex(struct ql3_adapter *qdev)
  1115. {
  1116. ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
  1117. PHYAddr[qdev->mac_index]);
  1118. }
  1119. static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
  1120. {
  1121. u16 reg;
  1122. u16 portConfiguration;
  1123. if (qdev->phyType == PHY_AGERE_ET1011C)
  1124. ql_mii_write_reg(qdev, 0x13, 0x0000);
  1125. /* turn off external loopback */
  1126. if (qdev->mac_index == 0)
  1127. portConfiguration =
  1128. qdev->nvram_data.macCfg_port0.portConfiguration;
  1129. else
  1130. portConfiguration =
  1131. qdev->nvram_data.macCfg_port1.portConfiguration;
  1132. /* Some HBA's in the field are set to 0 and they need to
  1133. be reinterpreted with a default value */
  1134. if (portConfiguration == 0)
  1135. portConfiguration = PORT_CONFIG_DEFAULT;
  1136. /* Set the 1000 advertisements */
  1137. ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
  1138. PHYAddr[qdev->mac_index]);
  1139. reg &= ~PHY_GIG_ALL_PARAMS;
  1140. if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
  1141. if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
  1142. reg |= PHY_GIG_ADV_1000F;
  1143. else
  1144. reg |= PHY_GIG_ADV_1000H;
  1145. }
  1146. ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
  1147. PHYAddr[qdev->mac_index]);
  1148. /* Set the 10/100 & pause negotiation advertisements */
  1149. ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
  1150. PHYAddr[qdev->mac_index]);
  1151. reg &= ~PHY_NEG_ALL_PARAMS;
  1152. if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
  1153. reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
  1154. if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
  1155. if (portConfiguration & PORT_CONFIG_100MB_SPEED)
  1156. reg |= PHY_NEG_ADV_100F;
  1157. if (portConfiguration & PORT_CONFIG_10MB_SPEED)
  1158. reg |= PHY_NEG_ADV_10F;
  1159. }
  1160. if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
  1161. if (portConfiguration & PORT_CONFIG_100MB_SPEED)
  1162. reg |= PHY_NEG_ADV_100H;
  1163. if (portConfiguration & PORT_CONFIG_10MB_SPEED)
  1164. reg |= PHY_NEG_ADV_10H;
  1165. }
  1166. if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
  1167. reg |= 1;
  1168. ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
  1169. PHYAddr[qdev->mac_index]);
  1170. ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
  1171. ql_mii_write_reg_ex(qdev, CONTROL_REG,
  1172. reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
  1173. PHYAddr[qdev->mac_index]);
  1174. }
  1175. static void ql_phy_init_ex(struct ql3_adapter *qdev)
  1176. {
  1177. ql_phy_reset_ex(qdev);
  1178. PHY_Setup(qdev);
  1179. ql_phy_start_neg_ex(qdev);
  1180. }
  1181. /*
  1182. * Caller holds hw_lock.
  1183. */
  1184. static u32 ql_get_link_state(struct ql3_adapter *qdev)
  1185. {
  1186. struct ql3xxx_port_registers __iomem *port_regs =
  1187. qdev->mem_map_registers;
  1188. u32 bitToCheck = 0;
  1189. u32 temp, linkState;
  1190. switch (qdev->mac_index) {
  1191. case 0:
  1192. bitToCheck = PORT_STATUS_UP0;
  1193. break;
  1194. case 1:
  1195. bitToCheck = PORT_STATUS_UP1;
  1196. break;
  1197. }
  1198. temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
  1199. if (temp & bitToCheck)
  1200. linkState = LS_UP;
  1201. else
  1202. linkState = LS_DOWN;
  1203. return linkState;
  1204. }
  1205. static int ql_port_start(struct ql3_adapter *qdev)
  1206. {
  1207. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1208. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1209. 2) << 7)) {
  1210. netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
  1211. return -1;
  1212. }
  1213. if (ql_is_fiber(qdev)) {
  1214. ql_petbi_init(qdev);
  1215. } else {
  1216. /* Copper port */
  1217. ql_phy_init_ex(qdev);
  1218. }
  1219. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1220. return 0;
  1221. }
  1222. static int ql_finish_auto_neg(struct ql3_adapter *qdev)
  1223. {
  1224. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1225. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1226. 2) << 7))
  1227. return -1;
  1228. if (!ql_auto_neg_error(qdev)) {
  1229. if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
  1230. /* configure the MAC */
  1231. netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
  1232. "Configuring link\n");
  1233. ql_mac_cfg_soft_reset(qdev, 1);
  1234. ql_mac_cfg_gig(qdev,
  1235. (ql_get_link_speed
  1236. (qdev) ==
  1237. SPEED_1000));
  1238. ql_mac_cfg_full_dup(qdev,
  1239. ql_is_link_full_dup
  1240. (qdev));
  1241. ql_mac_cfg_pause(qdev,
  1242. ql_is_neg_pause
  1243. (qdev));
  1244. ql_mac_cfg_soft_reset(qdev, 0);
  1245. /* enable the MAC */
  1246. netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
  1247. "Enabling mac\n");
  1248. ql_mac_enable(qdev, 1);
  1249. }
  1250. qdev->port_link_state = LS_UP;
  1251. netif_start_queue(qdev->ndev);
  1252. netif_carrier_on(qdev->ndev);
  1253. netif_info(qdev, link, qdev->ndev,
  1254. "Link is up at %d Mbps, %s duplex\n",
  1255. ql_get_link_speed(qdev),
  1256. ql_is_link_full_dup(qdev) ? "full" : "half");
  1257. } else { /* Remote error detected */
  1258. if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
  1259. netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
  1260. "Remote error detected. Calling ql_port_start()\n");
  1261. /*
  1262. * ql_port_start() is shared code and needs
  1263. * to lock the PHY on it's own.
  1264. */
  1265. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1266. if (ql_port_start(qdev)) /* Restart port */
  1267. return -1;
  1268. return 0;
  1269. }
  1270. }
  1271. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1272. return 0;
  1273. }
  1274. static void ql_link_state_machine_work(struct work_struct *work)
  1275. {
  1276. struct ql3_adapter *qdev =
  1277. container_of(work, struct ql3_adapter, link_state_work.work);
  1278. u32 curr_link_state;
  1279. unsigned long hw_flags;
  1280. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1281. curr_link_state = ql_get_link_state(qdev);
  1282. if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
  1283. netif_info(qdev, link, qdev->ndev,
  1284. "Reset in progress, skip processing link state\n");
  1285. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1286. /* Restart timer on 2 second interval. */
  1287. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  1288. return;
  1289. }
  1290. switch (qdev->port_link_state) {
  1291. default:
  1292. if (test_bit(QL_LINK_MASTER, &qdev->flags))
  1293. ql_port_start(qdev);
  1294. qdev->port_link_state = LS_DOWN;
  1295. /* Fall Through */
  1296. case LS_DOWN:
  1297. if (curr_link_state == LS_UP) {
  1298. netif_info(qdev, link, qdev->ndev, "Link is up\n");
  1299. if (ql_is_auto_neg_complete(qdev))
  1300. ql_finish_auto_neg(qdev);
  1301. if (qdev->port_link_state == LS_UP)
  1302. ql_link_down_detect_clear(qdev);
  1303. qdev->port_link_state = LS_UP;
  1304. }
  1305. break;
  1306. case LS_UP:
  1307. /*
  1308. * See if the link is currently down or went down and came
  1309. * back up
  1310. */
  1311. if (curr_link_state == LS_DOWN) {
  1312. netif_info(qdev, link, qdev->ndev, "Link is down\n");
  1313. qdev->port_link_state = LS_DOWN;
  1314. }
  1315. if (ql_link_down_detect(qdev))
  1316. qdev->port_link_state = LS_DOWN;
  1317. break;
  1318. }
  1319. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1320. /* Restart timer on 2 second interval. */
  1321. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  1322. }
  1323. /*
  1324. * Caller must take hw_lock and QL_PHY_GIO_SEM.
  1325. */
  1326. static void ql_get_phy_owner(struct ql3_adapter *qdev)
  1327. {
  1328. if (ql_this_adapter_controls_port(qdev))
  1329. set_bit(QL_LINK_MASTER, &qdev->flags);
  1330. else
  1331. clear_bit(QL_LINK_MASTER, &qdev->flags);
  1332. }
  1333. /*
  1334. * Caller must take hw_lock and QL_PHY_GIO_SEM.
  1335. */
  1336. static void ql_init_scan_mode(struct ql3_adapter *qdev)
  1337. {
  1338. ql_mii_enable_scan_mode(qdev);
  1339. if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
  1340. if (ql_this_adapter_controls_port(qdev))
  1341. ql_petbi_init_ex(qdev);
  1342. } else {
  1343. if (ql_this_adapter_controls_port(qdev))
  1344. ql_phy_init_ex(qdev);
  1345. }
  1346. }
  1347. /*
  1348. * MII_Setup needs to be called before taking the PHY out of reset
  1349. * so that the management interface clock speed can be set properly.
  1350. * It would be better if we had a way to disable MDC until after the
  1351. * PHY is out of reset, but we don't have that capability.
  1352. */
  1353. static int ql_mii_setup(struct ql3_adapter *qdev)
  1354. {
  1355. u32 reg;
  1356. struct ql3xxx_port_registers __iomem *port_regs =
  1357. qdev->mem_map_registers;
  1358. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1359. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  1360. 2) << 7))
  1361. return -1;
  1362. if (qdev->device_id == QL3032_DEVICE_ID)
  1363. ql_write_page0_reg(qdev,
  1364. &port_regs->macMIIMgmtControlReg, 0x0f00000);
  1365. /* Divide 125MHz clock by 28 to meet PHY timing requirements */
  1366. reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
  1367. ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
  1368. reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
  1369. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1370. return 0;
  1371. }
  1372. #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \
  1373. SUPPORTED_FIBRE | \
  1374. SUPPORTED_Autoneg)
  1375. #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \
  1376. SUPPORTED_10baseT_Full | \
  1377. SUPPORTED_100baseT_Half | \
  1378. SUPPORTED_100baseT_Full | \
  1379. SUPPORTED_1000baseT_Half | \
  1380. SUPPORTED_1000baseT_Full | \
  1381. SUPPORTED_Autoneg | \
  1382. SUPPORTED_TP) \
  1383. static u32 ql_supported_modes(struct ql3_adapter *qdev)
  1384. {
  1385. if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
  1386. return SUPPORTED_OPTICAL_MODES;
  1387. return SUPPORTED_TP_MODES;
  1388. }
  1389. static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
  1390. {
  1391. int status;
  1392. unsigned long hw_flags;
  1393. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1394. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1395. (QL_RESOURCE_BITS_BASE_CODE |
  1396. (qdev->mac_index) * 2) << 7)) {
  1397. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1398. return 0;
  1399. }
  1400. status = ql_is_auto_cfg(qdev);
  1401. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1402. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1403. return status;
  1404. }
  1405. static u32 ql_get_speed(struct ql3_adapter *qdev)
  1406. {
  1407. u32 status;
  1408. unsigned long hw_flags;
  1409. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1410. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1411. (QL_RESOURCE_BITS_BASE_CODE |
  1412. (qdev->mac_index) * 2) << 7)) {
  1413. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1414. return 0;
  1415. }
  1416. status = ql_get_link_speed(qdev);
  1417. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1418. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1419. return status;
  1420. }
  1421. static int ql_get_full_dup(struct ql3_adapter *qdev)
  1422. {
  1423. int status;
  1424. unsigned long hw_flags;
  1425. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1426. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  1427. (QL_RESOURCE_BITS_BASE_CODE |
  1428. (qdev->mac_index) * 2) << 7)) {
  1429. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1430. return 0;
  1431. }
  1432. status = ql_is_link_full_dup(qdev);
  1433. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  1434. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1435. return status;
  1436. }
  1437. static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
  1438. {
  1439. struct ql3_adapter *qdev = netdev_priv(ndev);
  1440. ecmd->transceiver = XCVR_INTERNAL;
  1441. ecmd->supported = ql_supported_modes(qdev);
  1442. if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
  1443. ecmd->port = PORT_FIBRE;
  1444. } else {
  1445. ecmd->port = PORT_TP;
  1446. ecmd->phy_address = qdev->PHYAddr;
  1447. }
  1448. ecmd->advertising = ql_supported_modes(qdev);
  1449. ecmd->autoneg = ql_get_auto_cfg_status(qdev);
  1450. ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev));
  1451. ecmd->duplex = ql_get_full_dup(qdev);
  1452. return 0;
  1453. }
  1454. static void ql_get_drvinfo(struct net_device *ndev,
  1455. struct ethtool_drvinfo *drvinfo)
  1456. {
  1457. struct ql3_adapter *qdev = netdev_priv(ndev);
  1458. strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
  1459. strlcpy(drvinfo->version, ql3xxx_driver_version,
  1460. sizeof(drvinfo->version));
  1461. strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
  1462. sizeof(drvinfo->bus_info));
  1463. }
  1464. static u32 ql_get_msglevel(struct net_device *ndev)
  1465. {
  1466. struct ql3_adapter *qdev = netdev_priv(ndev);
  1467. return qdev->msg_enable;
  1468. }
  1469. static void ql_set_msglevel(struct net_device *ndev, u32 value)
  1470. {
  1471. struct ql3_adapter *qdev = netdev_priv(ndev);
  1472. qdev->msg_enable = value;
  1473. }
  1474. static void ql_get_pauseparam(struct net_device *ndev,
  1475. struct ethtool_pauseparam *pause)
  1476. {
  1477. struct ql3_adapter *qdev = netdev_priv(ndev);
  1478. struct ql3xxx_port_registers __iomem *port_regs =
  1479. qdev->mem_map_registers;
  1480. u32 reg;
  1481. if (qdev->mac_index == 0)
  1482. reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
  1483. else
  1484. reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
  1485. pause->autoneg = ql_get_auto_cfg_status(qdev);
  1486. pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
  1487. pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
  1488. }
  1489. static const struct ethtool_ops ql3xxx_ethtool_ops = {
  1490. .get_settings = ql_get_settings,
  1491. .get_drvinfo = ql_get_drvinfo,
  1492. .get_link = ethtool_op_get_link,
  1493. .get_msglevel = ql_get_msglevel,
  1494. .set_msglevel = ql_set_msglevel,
  1495. .get_pauseparam = ql_get_pauseparam,
  1496. };
  1497. static int ql_populate_free_queue(struct ql3_adapter *qdev)
  1498. {
  1499. struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
  1500. dma_addr_t map;
  1501. int err;
  1502. while (lrg_buf_cb) {
  1503. if (!lrg_buf_cb->skb) {
  1504. lrg_buf_cb->skb =
  1505. netdev_alloc_skb(qdev->ndev,
  1506. qdev->lrg_buffer_len);
  1507. if (unlikely(!lrg_buf_cb->skb)) {
  1508. netdev_printk(KERN_DEBUG, qdev->ndev,
  1509. "Failed netdev_alloc_skb()\n");
  1510. break;
  1511. } else {
  1512. /*
  1513. * We save some space to copy the ethhdr from
  1514. * first buffer
  1515. */
  1516. skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
  1517. map = pci_map_single(qdev->pdev,
  1518. lrg_buf_cb->skb->data,
  1519. qdev->lrg_buffer_len -
  1520. QL_HEADER_SPACE,
  1521. PCI_DMA_FROMDEVICE);
  1522. err = pci_dma_mapping_error(qdev->pdev, map);
  1523. if (err) {
  1524. netdev_err(qdev->ndev,
  1525. "PCI mapping failed with error: %d\n",
  1526. err);
  1527. dev_kfree_skb(lrg_buf_cb->skb);
  1528. lrg_buf_cb->skb = NULL;
  1529. break;
  1530. }
  1531. lrg_buf_cb->buf_phy_addr_low =
  1532. cpu_to_le32(LS_64BITS(map));
  1533. lrg_buf_cb->buf_phy_addr_high =
  1534. cpu_to_le32(MS_64BITS(map));
  1535. dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  1536. dma_unmap_len_set(lrg_buf_cb, maplen,
  1537. qdev->lrg_buffer_len -
  1538. QL_HEADER_SPACE);
  1539. --qdev->lrg_buf_skb_check;
  1540. if (!qdev->lrg_buf_skb_check)
  1541. return 1;
  1542. }
  1543. }
  1544. lrg_buf_cb = lrg_buf_cb->next;
  1545. }
  1546. return 0;
  1547. }
  1548. /*
  1549. * Caller holds hw_lock.
  1550. */
  1551. static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
  1552. {
  1553. struct ql3xxx_port_registers __iomem *port_regs =
  1554. qdev->mem_map_registers;
  1555. if (qdev->small_buf_release_cnt >= 16) {
  1556. while (qdev->small_buf_release_cnt >= 16) {
  1557. qdev->small_buf_q_producer_index++;
  1558. if (qdev->small_buf_q_producer_index ==
  1559. NUM_SBUFQ_ENTRIES)
  1560. qdev->small_buf_q_producer_index = 0;
  1561. qdev->small_buf_release_cnt -= 8;
  1562. }
  1563. wmb();
  1564. writel(qdev->small_buf_q_producer_index,
  1565. &port_regs->CommonRegs.rxSmallQProducerIndex);
  1566. }
  1567. }
  1568. /*
  1569. * Caller holds hw_lock.
  1570. */
  1571. static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
  1572. {
  1573. struct bufq_addr_element *lrg_buf_q_ele;
  1574. int i;
  1575. struct ql_rcv_buf_cb *lrg_buf_cb;
  1576. struct ql3xxx_port_registers __iomem *port_regs =
  1577. qdev->mem_map_registers;
  1578. if ((qdev->lrg_buf_free_count >= 8) &&
  1579. (qdev->lrg_buf_release_cnt >= 16)) {
  1580. if (qdev->lrg_buf_skb_check)
  1581. if (!ql_populate_free_queue(qdev))
  1582. return;
  1583. lrg_buf_q_ele = qdev->lrg_buf_next_free;
  1584. while ((qdev->lrg_buf_release_cnt >= 16) &&
  1585. (qdev->lrg_buf_free_count >= 8)) {
  1586. for (i = 0; i < 8; i++) {
  1587. lrg_buf_cb =
  1588. ql_get_from_lrg_buf_free_list(qdev);
  1589. lrg_buf_q_ele->addr_high =
  1590. lrg_buf_cb->buf_phy_addr_high;
  1591. lrg_buf_q_ele->addr_low =
  1592. lrg_buf_cb->buf_phy_addr_low;
  1593. lrg_buf_q_ele++;
  1594. qdev->lrg_buf_release_cnt--;
  1595. }
  1596. qdev->lrg_buf_q_producer_index++;
  1597. if (qdev->lrg_buf_q_producer_index ==
  1598. qdev->num_lbufq_entries)
  1599. qdev->lrg_buf_q_producer_index = 0;
  1600. if (qdev->lrg_buf_q_producer_index ==
  1601. (qdev->num_lbufq_entries - 1)) {
  1602. lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
  1603. }
  1604. }
  1605. wmb();
  1606. qdev->lrg_buf_next_free = lrg_buf_q_ele;
  1607. writel(qdev->lrg_buf_q_producer_index,
  1608. &port_regs->CommonRegs.rxLargeQProducerIndex);
  1609. }
  1610. }
  1611. static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
  1612. struct ob_mac_iocb_rsp *mac_rsp)
  1613. {
  1614. struct ql_tx_buf_cb *tx_cb;
  1615. int i;
  1616. if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
  1617. netdev_warn(qdev->ndev,
  1618. "Frame too short but it was padded and sent\n");
  1619. }
  1620. tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
  1621. /* Check the transmit response flags for any errors */
  1622. if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
  1623. netdev_err(qdev->ndev,
  1624. "Frame too short to be legal, frame not sent\n");
  1625. qdev->ndev->stats.tx_errors++;
  1626. goto frame_not_sent;
  1627. }
  1628. if (tx_cb->seg_count == 0) {
  1629. netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
  1630. mac_rsp->transaction_id);
  1631. qdev->ndev->stats.tx_errors++;
  1632. goto invalid_seg_count;
  1633. }
  1634. pci_unmap_single(qdev->pdev,
  1635. dma_unmap_addr(&tx_cb->map[0], mapaddr),
  1636. dma_unmap_len(&tx_cb->map[0], maplen),
  1637. PCI_DMA_TODEVICE);
  1638. tx_cb->seg_count--;
  1639. if (tx_cb->seg_count) {
  1640. for (i = 1; i < tx_cb->seg_count; i++) {
  1641. pci_unmap_page(qdev->pdev,
  1642. dma_unmap_addr(&tx_cb->map[i],
  1643. mapaddr),
  1644. dma_unmap_len(&tx_cb->map[i], maplen),
  1645. PCI_DMA_TODEVICE);
  1646. }
  1647. }
  1648. qdev->ndev->stats.tx_packets++;
  1649. qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
  1650. frame_not_sent:
  1651. dev_kfree_skb_irq(tx_cb->skb);
  1652. tx_cb->skb = NULL;
  1653. invalid_seg_count:
  1654. atomic_inc(&qdev->tx_count);
  1655. }
  1656. static void ql_get_sbuf(struct ql3_adapter *qdev)
  1657. {
  1658. if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
  1659. qdev->small_buf_index = 0;
  1660. qdev->small_buf_release_cnt++;
  1661. }
  1662. static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
  1663. {
  1664. struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
  1665. lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
  1666. qdev->lrg_buf_release_cnt++;
  1667. if (++qdev->lrg_buf_index == qdev->num_large_buffers)
  1668. qdev->lrg_buf_index = 0;
  1669. return lrg_buf_cb;
  1670. }
  1671. /*
  1672. * The difference between 3022 and 3032 for inbound completions:
  1673. * 3022 uses two buffers per completion. The first buffer contains
  1674. * (some) header info, the second the remainder of the headers plus
  1675. * the data. For this chip we reserve some space at the top of the
  1676. * receive buffer so that the header info in buffer one can be
  1677. * prepended to the buffer two. Buffer two is the sent up while
  1678. * buffer one is returned to the hardware to be reused.
  1679. * 3032 receives all of it's data and headers in one buffer for a
  1680. * simpler process. 3032 also supports checksum verification as
  1681. * can be seen in ql_process_macip_rx_intr().
  1682. */
  1683. static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
  1684. struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
  1685. {
  1686. struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
  1687. struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
  1688. struct sk_buff *skb;
  1689. u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
  1690. /*
  1691. * Get the inbound address list (small buffer).
  1692. */
  1693. ql_get_sbuf(qdev);
  1694. if (qdev->device_id == QL3022_DEVICE_ID)
  1695. lrg_buf_cb1 = ql_get_lbuf(qdev);
  1696. /* start of second buffer */
  1697. lrg_buf_cb2 = ql_get_lbuf(qdev);
  1698. skb = lrg_buf_cb2->skb;
  1699. qdev->ndev->stats.rx_packets++;
  1700. qdev->ndev->stats.rx_bytes += length;
  1701. skb_put(skb, length);
  1702. pci_unmap_single(qdev->pdev,
  1703. dma_unmap_addr(lrg_buf_cb2, mapaddr),
  1704. dma_unmap_len(lrg_buf_cb2, maplen),
  1705. PCI_DMA_FROMDEVICE);
  1706. prefetch(skb->data);
  1707. skb_checksum_none_assert(skb);
  1708. skb->protocol = eth_type_trans(skb, qdev->ndev);
  1709. netif_receive_skb(skb);
  1710. lrg_buf_cb2->skb = NULL;
  1711. if (qdev->device_id == QL3022_DEVICE_ID)
  1712. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
  1713. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
  1714. }
  1715. static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
  1716. struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
  1717. {
  1718. struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
  1719. struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
  1720. struct sk_buff *skb1 = NULL, *skb2;
  1721. struct net_device *ndev = qdev->ndev;
  1722. u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
  1723. u16 size = 0;
  1724. /*
  1725. * Get the inbound address list (small buffer).
  1726. */
  1727. ql_get_sbuf(qdev);
  1728. if (qdev->device_id == QL3022_DEVICE_ID) {
  1729. /* start of first buffer on 3022 */
  1730. lrg_buf_cb1 = ql_get_lbuf(qdev);
  1731. skb1 = lrg_buf_cb1->skb;
  1732. size = ETH_HLEN;
  1733. if (*((u16 *) skb1->data) != 0xFFFF)
  1734. size += VLAN_ETH_HLEN - ETH_HLEN;
  1735. }
  1736. /* start of second buffer */
  1737. lrg_buf_cb2 = ql_get_lbuf(qdev);
  1738. skb2 = lrg_buf_cb2->skb;
  1739. skb_put(skb2, length); /* Just the second buffer length here. */
  1740. pci_unmap_single(qdev->pdev,
  1741. dma_unmap_addr(lrg_buf_cb2, mapaddr),
  1742. dma_unmap_len(lrg_buf_cb2, maplen),
  1743. PCI_DMA_FROMDEVICE);
  1744. prefetch(skb2->data);
  1745. skb_checksum_none_assert(skb2);
  1746. if (qdev->device_id == QL3022_DEVICE_ID) {
  1747. /*
  1748. * Copy the ethhdr from first buffer to second. This
  1749. * is necessary for 3022 IP completions.
  1750. */
  1751. skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
  1752. skb_push(skb2, size), size);
  1753. } else {
  1754. u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
  1755. if (checksum &
  1756. (IB_IP_IOCB_RSP_3032_ICE |
  1757. IB_IP_IOCB_RSP_3032_CE)) {
  1758. netdev_err(ndev,
  1759. "%s: Bad checksum for this %s packet, checksum = %x\n",
  1760. __func__,
  1761. ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
  1762. "TCP" : "UDP"), checksum);
  1763. } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
  1764. (checksum & IB_IP_IOCB_RSP_3032_UDP &&
  1765. !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
  1766. skb2->ip_summed = CHECKSUM_UNNECESSARY;
  1767. }
  1768. }
  1769. skb2->protocol = eth_type_trans(skb2, qdev->ndev);
  1770. netif_receive_skb(skb2);
  1771. ndev->stats.rx_packets++;
  1772. ndev->stats.rx_bytes += length;
  1773. lrg_buf_cb2->skb = NULL;
  1774. if (qdev->device_id == QL3022_DEVICE_ID)
  1775. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
  1776. ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
  1777. }
  1778. static int ql_tx_rx_clean(struct ql3_adapter *qdev,
  1779. int *tx_cleaned, int *rx_cleaned, int work_to_do)
  1780. {
  1781. struct net_rsp_iocb *net_rsp;
  1782. struct net_device *ndev = qdev->ndev;
  1783. int work_done = 0;
  1784. /* While there are entries in the completion queue. */
  1785. while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
  1786. qdev->rsp_consumer_index) && (work_done < work_to_do)) {
  1787. net_rsp = qdev->rsp_current;
  1788. rmb();
  1789. /*
  1790. * Fix 4032 chip's undocumented "feature" where bit-8 is set
  1791. * if the inbound completion is for a VLAN.
  1792. */
  1793. if (qdev->device_id == QL3032_DEVICE_ID)
  1794. net_rsp->opcode &= 0x7f;
  1795. switch (net_rsp->opcode) {
  1796. case OPCODE_OB_MAC_IOCB_FN0:
  1797. case OPCODE_OB_MAC_IOCB_FN2:
  1798. ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
  1799. net_rsp);
  1800. (*tx_cleaned)++;
  1801. break;
  1802. case OPCODE_IB_MAC_IOCB:
  1803. case OPCODE_IB_3032_MAC_IOCB:
  1804. ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
  1805. net_rsp);
  1806. (*rx_cleaned)++;
  1807. break;
  1808. case OPCODE_IB_IP_IOCB:
  1809. case OPCODE_IB_3032_IP_IOCB:
  1810. ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
  1811. net_rsp);
  1812. (*rx_cleaned)++;
  1813. break;
  1814. default: {
  1815. u32 *tmp = (u32 *)net_rsp;
  1816. netdev_err(ndev,
  1817. "Hit default case, not handled!\n"
  1818. " dropping the packet, opcode = %x\n"
  1819. "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
  1820. net_rsp->opcode,
  1821. (unsigned long int)tmp[0],
  1822. (unsigned long int)tmp[1],
  1823. (unsigned long int)tmp[2],
  1824. (unsigned long int)tmp[3]);
  1825. }
  1826. }
  1827. qdev->rsp_consumer_index++;
  1828. if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
  1829. qdev->rsp_consumer_index = 0;
  1830. qdev->rsp_current = qdev->rsp_q_virt_addr;
  1831. } else {
  1832. qdev->rsp_current++;
  1833. }
  1834. work_done = *tx_cleaned + *rx_cleaned;
  1835. }
  1836. return work_done;
  1837. }
  1838. static int ql_poll(struct napi_struct *napi, int budget)
  1839. {
  1840. struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
  1841. int rx_cleaned = 0, tx_cleaned = 0;
  1842. unsigned long hw_flags;
  1843. struct ql3xxx_port_registers __iomem *port_regs =
  1844. qdev->mem_map_registers;
  1845. ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
  1846. if (tx_cleaned + rx_cleaned != budget) {
  1847. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  1848. __napi_complete(napi);
  1849. ql_update_small_bufq_prod_index(qdev);
  1850. ql_update_lrg_bufq_prod_index(qdev);
  1851. writel(qdev->rsp_consumer_index,
  1852. &port_regs->CommonRegs.rspQConsumerIndex);
  1853. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  1854. ql_enable_interrupts(qdev);
  1855. }
  1856. return tx_cleaned + rx_cleaned;
  1857. }
  1858. static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
  1859. {
  1860. struct net_device *ndev = dev_id;
  1861. struct ql3_adapter *qdev = netdev_priv(ndev);
  1862. struct ql3xxx_port_registers __iomem *port_regs =
  1863. qdev->mem_map_registers;
  1864. u32 value;
  1865. int handled = 1;
  1866. u32 var;
  1867. value = ql_read_common_reg_l(qdev,
  1868. &port_regs->CommonRegs.ispControlStatus);
  1869. if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
  1870. spin_lock(&qdev->adapter_lock);
  1871. netif_stop_queue(qdev->ndev);
  1872. netif_carrier_off(qdev->ndev);
  1873. ql_disable_interrupts(qdev);
  1874. qdev->port_link_state = LS_DOWN;
  1875. set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
  1876. if (value & ISP_CONTROL_FE) {
  1877. /*
  1878. * Chip Fatal Error.
  1879. */
  1880. var =
  1881. ql_read_page0_reg_l(qdev,
  1882. &port_regs->PortFatalErrStatus);
  1883. netdev_warn(ndev,
  1884. "Resetting chip. PortFatalErrStatus register = 0x%x\n",
  1885. var);
  1886. set_bit(QL_RESET_START, &qdev->flags) ;
  1887. } else {
  1888. /*
  1889. * Soft Reset Requested.
  1890. */
  1891. set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
  1892. netdev_err(ndev,
  1893. "Another function issued a reset to the chip. ISR value = %x\n",
  1894. value);
  1895. }
  1896. queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
  1897. spin_unlock(&qdev->adapter_lock);
  1898. } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
  1899. ql_disable_interrupts(qdev);
  1900. if (likely(napi_schedule_prep(&qdev->napi)))
  1901. __napi_schedule(&qdev->napi);
  1902. } else
  1903. return IRQ_NONE;
  1904. return IRQ_RETVAL(handled);
  1905. }
  1906. /*
  1907. * Get the total number of segments needed for the given number of fragments.
  1908. * This is necessary because outbound address lists (OAL) will be used when
  1909. * more than two frags are given. Each address list has 5 addr/len pairs.
  1910. * The 5th pair in each OAL is used to point to the next OAL if more frags
  1911. * are coming. That is why the frags:segment count ratio is not linear.
  1912. */
  1913. static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
  1914. {
  1915. if (qdev->device_id == QL3022_DEVICE_ID)
  1916. return 1;
  1917. if (frags <= 2)
  1918. return frags + 1;
  1919. else if (frags <= 6)
  1920. return frags + 2;
  1921. else if (frags <= 10)
  1922. return frags + 3;
  1923. else if (frags <= 14)
  1924. return frags + 4;
  1925. else if (frags <= 18)
  1926. return frags + 5;
  1927. return -1;
  1928. }
  1929. static void ql_hw_csum_setup(const struct sk_buff *skb,
  1930. struct ob_mac_iocb_req *mac_iocb_ptr)
  1931. {
  1932. const struct iphdr *ip = ip_hdr(skb);
  1933. mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
  1934. mac_iocb_ptr->ip_hdr_len = ip->ihl;
  1935. if (ip->protocol == IPPROTO_TCP) {
  1936. mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
  1937. OB_3032MAC_IOCB_REQ_IC;
  1938. } else {
  1939. mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
  1940. OB_3032MAC_IOCB_REQ_IC;
  1941. }
  1942. }
  1943. /*
  1944. * Map the buffers for this transmit.
  1945. * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  1946. */
  1947. static int ql_send_map(struct ql3_adapter *qdev,
  1948. struct ob_mac_iocb_req *mac_iocb_ptr,
  1949. struct ql_tx_buf_cb *tx_cb,
  1950. struct sk_buff *skb)
  1951. {
  1952. struct oal *oal;
  1953. struct oal_entry *oal_entry;
  1954. int len = skb_headlen(skb);
  1955. dma_addr_t map;
  1956. int err;
  1957. int completed_segs, i;
  1958. int seg_cnt, seg = 0;
  1959. int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
  1960. seg_cnt = tx_cb->seg_count;
  1961. /*
  1962. * Map the skb buffer first.
  1963. */
  1964. map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
  1965. err = pci_dma_mapping_error(qdev->pdev, map);
  1966. if (err) {
  1967. netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
  1968. err);
  1969. return NETDEV_TX_BUSY;
  1970. }
  1971. oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
  1972. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  1973. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  1974. oal_entry->len = cpu_to_le32(len);
  1975. dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  1976. dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
  1977. seg++;
  1978. if (seg_cnt == 1) {
  1979. /* Terminate the last segment. */
  1980. oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
  1981. return NETDEV_TX_OK;
  1982. }
  1983. oal = tx_cb->oal;
  1984. for (completed_segs = 0;
  1985. completed_segs < frag_cnt;
  1986. completed_segs++, seg++) {
  1987. skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
  1988. oal_entry++;
  1989. /*
  1990. * Check for continuation requirements.
  1991. * It's strange but necessary.
  1992. * Continuation entry points to outbound address list.
  1993. */
  1994. if ((seg == 2 && seg_cnt > 3) ||
  1995. (seg == 7 && seg_cnt > 8) ||
  1996. (seg == 12 && seg_cnt > 13) ||
  1997. (seg == 17 && seg_cnt > 18)) {
  1998. map = pci_map_single(qdev->pdev, oal,
  1999. sizeof(struct oal),
  2000. PCI_DMA_TODEVICE);
  2001. err = pci_dma_mapping_error(qdev->pdev, map);
  2002. if (err) {
  2003. netdev_err(qdev->ndev,
  2004. "PCI mapping outbound address list with error: %d\n",
  2005. err);
  2006. goto map_error;
  2007. }
  2008. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  2009. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  2010. oal_entry->len = cpu_to_le32(sizeof(struct oal) |
  2011. OAL_CONT_ENTRY);
  2012. dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  2013. dma_unmap_len_set(&tx_cb->map[seg], maplen,
  2014. sizeof(struct oal));
  2015. oal_entry = (struct oal_entry *)oal;
  2016. oal++;
  2017. seg++;
  2018. }
  2019. map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
  2020. DMA_TO_DEVICE);
  2021. err = dma_mapping_error(&qdev->pdev->dev, map);
  2022. if (err) {
  2023. netdev_err(qdev->ndev,
  2024. "PCI mapping frags failed with error: %d\n",
  2025. err);
  2026. goto map_error;
  2027. }
  2028. oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
  2029. oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
  2030. oal_entry->len = cpu_to_le32(skb_frag_size(frag));
  2031. dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
  2032. dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
  2033. }
  2034. /* Terminate the last segment. */
  2035. oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
  2036. return NETDEV_TX_OK;
  2037. map_error:
  2038. /* A PCI mapping failed and now we will need to back out
  2039. * We need to traverse through the oal's and associated pages which
  2040. * have been mapped and now we must unmap them to clean up properly
  2041. */
  2042. seg = 1;
  2043. oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
  2044. oal = tx_cb->oal;
  2045. for (i = 0; i < completed_segs; i++, seg++) {
  2046. oal_entry++;
  2047. /*
  2048. * Check for continuation requirements.
  2049. * It's strange but necessary.
  2050. */
  2051. if ((seg == 2 && seg_cnt > 3) ||
  2052. (seg == 7 && seg_cnt > 8) ||
  2053. (seg == 12 && seg_cnt > 13) ||
  2054. (seg == 17 && seg_cnt > 18)) {
  2055. pci_unmap_single(qdev->pdev,
  2056. dma_unmap_addr(&tx_cb->map[seg], mapaddr),
  2057. dma_unmap_len(&tx_cb->map[seg], maplen),
  2058. PCI_DMA_TODEVICE);
  2059. oal++;
  2060. seg++;
  2061. }
  2062. pci_unmap_page(qdev->pdev,
  2063. dma_unmap_addr(&tx_cb->map[seg], mapaddr),
  2064. dma_unmap_len(&tx_cb->map[seg], maplen),
  2065. PCI_DMA_TODEVICE);
  2066. }
  2067. pci_unmap_single(qdev->pdev,
  2068. dma_unmap_addr(&tx_cb->map[0], mapaddr),
  2069. dma_unmap_addr(&tx_cb->map[0], maplen),
  2070. PCI_DMA_TODEVICE);
  2071. return NETDEV_TX_BUSY;
  2072. }
  2073. /*
  2074. * The difference between 3022 and 3032 sends:
  2075. * 3022 only supports a simple single segment transmission.
  2076. * 3032 supports checksumming and scatter/gather lists (fragments).
  2077. * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
  2078. * in the IOCB plus a chain of outbound address lists (OAL) that
  2079. * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
  2080. * will be used to point to an OAL when more ALP entries are required.
  2081. * The IOCB is always the top of the chain followed by one or more
  2082. * OALs (when necessary).
  2083. */
  2084. static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
  2085. struct net_device *ndev)
  2086. {
  2087. struct ql3_adapter *qdev = netdev_priv(ndev);
  2088. struct ql3xxx_port_registers __iomem *port_regs =
  2089. qdev->mem_map_registers;
  2090. struct ql_tx_buf_cb *tx_cb;
  2091. u32 tot_len = skb->len;
  2092. struct ob_mac_iocb_req *mac_iocb_ptr;
  2093. if (unlikely(atomic_read(&qdev->tx_count) < 2))
  2094. return NETDEV_TX_BUSY;
  2095. tx_cb = &qdev->tx_buf[qdev->req_producer_index];
  2096. tx_cb->seg_count = ql_get_seg_count(qdev,
  2097. skb_shinfo(skb)->nr_frags);
  2098. if (tx_cb->seg_count == -1) {
  2099. netdev_err(ndev, "%s: invalid segment count!\n", __func__);
  2100. return NETDEV_TX_OK;
  2101. }
  2102. mac_iocb_ptr = tx_cb->queue_entry;
  2103. memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
  2104. mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
  2105. mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
  2106. mac_iocb_ptr->flags |= qdev->mb_bit_mask;
  2107. mac_iocb_ptr->transaction_id = qdev->req_producer_index;
  2108. mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
  2109. tx_cb->skb = skb;
  2110. if (qdev->device_id == QL3032_DEVICE_ID &&
  2111. skb->ip_summed == CHECKSUM_PARTIAL)
  2112. ql_hw_csum_setup(skb, mac_iocb_ptr);
  2113. if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
  2114. netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
  2115. return NETDEV_TX_BUSY;
  2116. }
  2117. wmb();
  2118. qdev->req_producer_index++;
  2119. if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
  2120. qdev->req_producer_index = 0;
  2121. wmb();
  2122. ql_write_common_reg_l(qdev,
  2123. &port_regs->CommonRegs.reqQProducerIndex,
  2124. qdev->req_producer_index);
  2125. netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
  2126. "tx queued, slot %d, len %d\n",
  2127. qdev->req_producer_index, skb->len);
  2128. atomic_dec(&qdev->tx_count);
  2129. return NETDEV_TX_OK;
  2130. }
  2131. static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
  2132. {
  2133. qdev->req_q_size =
  2134. (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
  2135. qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
  2136. /* The barrier is required to ensure request and response queue
  2137. * addr writes to the registers.
  2138. */
  2139. wmb();
  2140. qdev->req_q_virt_addr =
  2141. pci_alloc_consistent(qdev->pdev,
  2142. (size_t) qdev->req_q_size,
  2143. &qdev->req_q_phy_addr);
  2144. if ((qdev->req_q_virt_addr == NULL) ||
  2145. LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
  2146. netdev_err(qdev->ndev, "reqQ failed\n");
  2147. return -ENOMEM;
  2148. }
  2149. qdev->rsp_q_virt_addr =
  2150. pci_alloc_consistent(qdev->pdev,
  2151. (size_t) qdev->rsp_q_size,
  2152. &qdev->rsp_q_phy_addr);
  2153. if ((qdev->rsp_q_virt_addr == NULL) ||
  2154. LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
  2155. netdev_err(qdev->ndev, "rspQ allocation failed\n");
  2156. pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
  2157. qdev->req_q_virt_addr,
  2158. qdev->req_q_phy_addr);
  2159. return -ENOMEM;
  2160. }
  2161. set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
  2162. return 0;
  2163. }
  2164. static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
  2165. {
  2166. if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
  2167. netdev_info(qdev->ndev, "Already done\n");
  2168. return;
  2169. }
  2170. pci_free_consistent(qdev->pdev,
  2171. qdev->req_q_size,
  2172. qdev->req_q_virt_addr, qdev->req_q_phy_addr);
  2173. qdev->req_q_virt_addr = NULL;
  2174. pci_free_consistent(qdev->pdev,
  2175. qdev->rsp_q_size,
  2176. qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
  2177. qdev->rsp_q_virt_addr = NULL;
  2178. clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
  2179. }
  2180. static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
  2181. {
  2182. /* Create Large Buffer Queue */
  2183. qdev->lrg_buf_q_size =
  2184. qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
  2185. if (qdev->lrg_buf_q_size < PAGE_SIZE)
  2186. qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
  2187. else
  2188. qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
  2189. qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
  2190. sizeof(struct ql_rcv_buf_cb),
  2191. GFP_KERNEL);
  2192. if (qdev->lrg_buf == NULL)
  2193. return -ENOMEM;
  2194. qdev->lrg_buf_q_alloc_virt_addr =
  2195. pci_alloc_consistent(qdev->pdev,
  2196. qdev->lrg_buf_q_alloc_size,
  2197. &qdev->lrg_buf_q_alloc_phy_addr);
  2198. if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
  2199. netdev_err(qdev->ndev, "lBufQ failed\n");
  2200. return -ENOMEM;
  2201. }
  2202. qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
  2203. qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
  2204. /* Create Small Buffer Queue */
  2205. qdev->small_buf_q_size =
  2206. NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
  2207. if (qdev->small_buf_q_size < PAGE_SIZE)
  2208. qdev->small_buf_q_alloc_size = PAGE_SIZE;
  2209. else
  2210. qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
  2211. qdev->small_buf_q_alloc_virt_addr =
  2212. pci_alloc_consistent(qdev->pdev,
  2213. qdev->small_buf_q_alloc_size,
  2214. &qdev->small_buf_q_alloc_phy_addr);
  2215. if (qdev->small_buf_q_alloc_virt_addr == NULL) {
  2216. netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
  2217. pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
  2218. qdev->lrg_buf_q_alloc_virt_addr,
  2219. qdev->lrg_buf_q_alloc_phy_addr);
  2220. return -ENOMEM;
  2221. }
  2222. qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
  2223. qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
  2224. set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
  2225. return 0;
  2226. }
  2227. static void ql_free_buffer_queues(struct ql3_adapter *qdev)
  2228. {
  2229. if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
  2230. netdev_info(qdev->ndev, "Already done\n");
  2231. return;
  2232. }
  2233. kfree(qdev->lrg_buf);
  2234. pci_free_consistent(qdev->pdev,
  2235. qdev->lrg_buf_q_alloc_size,
  2236. qdev->lrg_buf_q_alloc_virt_addr,
  2237. qdev->lrg_buf_q_alloc_phy_addr);
  2238. qdev->lrg_buf_q_virt_addr = NULL;
  2239. pci_free_consistent(qdev->pdev,
  2240. qdev->small_buf_q_alloc_size,
  2241. qdev->small_buf_q_alloc_virt_addr,
  2242. qdev->small_buf_q_alloc_phy_addr);
  2243. qdev->small_buf_q_virt_addr = NULL;
  2244. clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
  2245. }
  2246. static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
  2247. {
  2248. int i;
  2249. struct bufq_addr_element *small_buf_q_entry;
  2250. /* Currently we allocate on one of memory and use it for smallbuffers */
  2251. qdev->small_buf_total_size =
  2252. (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
  2253. QL_SMALL_BUFFER_SIZE);
  2254. qdev->small_buf_virt_addr =
  2255. pci_alloc_consistent(qdev->pdev,
  2256. qdev->small_buf_total_size,
  2257. &qdev->small_buf_phy_addr);
  2258. if (qdev->small_buf_virt_addr == NULL) {
  2259. netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
  2260. return -ENOMEM;
  2261. }
  2262. qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
  2263. qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
  2264. small_buf_q_entry = qdev->small_buf_q_virt_addr;
  2265. /* Initialize the small buffer queue. */
  2266. for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
  2267. small_buf_q_entry->addr_high =
  2268. cpu_to_le32(qdev->small_buf_phy_addr_high);
  2269. small_buf_q_entry->addr_low =
  2270. cpu_to_le32(qdev->small_buf_phy_addr_low +
  2271. (i * QL_SMALL_BUFFER_SIZE));
  2272. small_buf_q_entry++;
  2273. }
  2274. qdev->small_buf_index = 0;
  2275. set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
  2276. return 0;
  2277. }
  2278. static void ql_free_small_buffers(struct ql3_adapter *qdev)
  2279. {
  2280. if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
  2281. netdev_info(qdev->ndev, "Already done\n");
  2282. return;
  2283. }
  2284. if (qdev->small_buf_virt_addr != NULL) {
  2285. pci_free_consistent(qdev->pdev,
  2286. qdev->small_buf_total_size,
  2287. qdev->small_buf_virt_addr,
  2288. qdev->small_buf_phy_addr);
  2289. qdev->small_buf_virt_addr = NULL;
  2290. }
  2291. }
  2292. static void ql_free_large_buffers(struct ql3_adapter *qdev)
  2293. {
  2294. int i = 0;
  2295. struct ql_rcv_buf_cb *lrg_buf_cb;
  2296. for (i = 0; i < qdev->num_large_buffers; i++) {
  2297. lrg_buf_cb = &qdev->lrg_buf[i];
  2298. if (lrg_buf_cb->skb) {
  2299. dev_kfree_skb(lrg_buf_cb->skb);
  2300. pci_unmap_single(qdev->pdev,
  2301. dma_unmap_addr(lrg_buf_cb, mapaddr),
  2302. dma_unmap_len(lrg_buf_cb, maplen),
  2303. PCI_DMA_FROMDEVICE);
  2304. memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
  2305. } else {
  2306. break;
  2307. }
  2308. }
  2309. }
  2310. static void ql_init_large_buffers(struct ql3_adapter *qdev)
  2311. {
  2312. int i;
  2313. struct ql_rcv_buf_cb *lrg_buf_cb;
  2314. struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
  2315. for (i = 0; i < qdev->num_large_buffers; i++) {
  2316. lrg_buf_cb = &qdev->lrg_buf[i];
  2317. buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
  2318. buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
  2319. buf_addr_ele++;
  2320. }
  2321. qdev->lrg_buf_index = 0;
  2322. qdev->lrg_buf_skb_check = 0;
  2323. }
  2324. static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
  2325. {
  2326. int i;
  2327. struct ql_rcv_buf_cb *lrg_buf_cb;
  2328. struct sk_buff *skb;
  2329. dma_addr_t map;
  2330. int err;
  2331. for (i = 0; i < qdev->num_large_buffers; i++) {
  2332. skb = netdev_alloc_skb(qdev->ndev,
  2333. qdev->lrg_buffer_len);
  2334. if (unlikely(!skb)) {
  2335. /* Better luck next round */
  2336. netdev_err(qdev->ndev,
  2337. "large buff alloc failed for %d bytes at index %d\n",
  2338. qdev->lrg_buffer_len * 2, i);
  2339. ql_free_large_buffers(qdev);
  2340. return -ENOMEM;
  2341. } else {
  2342. lrg_buf_cb = &qdev->lrg_buf[i];
  2343. memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
  2344. lrg_buf_cb->index = i;
  2345. lrg_buf_cb->skb = skb;
  2346. /*
  2347. * We save some space to copy the ethhdr from first
  2348. * buffer
  2349. */
  2350. skb_reserve(skb, QL_HEADER_SPACE);
  2351. map = pci_map_single(qdev->pdev,
  2352. skb->data,
  2353. qdev->lrg_buffer_len -
  2354. QL_HEADER_SPACE,
  2355. PCI_DMA_FROMDEVICE);
  2356. err = pci_dma_mapping_error(qdev->pdev, map);
  2357. if (err) {
  2358. netdev_err(qdev->ndev,
  2359. "PCI mapping failed with error: %d\n",
  2360. err);
  2361. ql_free_large_buffers(qdev);
  2362. return -ENOMEM;
  2363. }
  2364. dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
  2365. dma_unmap_len_set(lrg_buf_cb, maplen,
  2366. qdev->lrg_buffer_len -
  2367. QL_HEADER_SPACE);
  2368. lrg_buf_cb->buf_phy_addr_low =
  2369. cpu_to_le32(LS_64BITS(map));
  2370. lrg_buf_cb->buf_phy_addr_high =
  2371. cpu_to_le32(MS_64BITS(map));
  2372. }
  2373. }
  2374. return 0;
  2375. }
  2376. static void ql_free_send_free_list(struct ql3_adapter *qdev)
  2377. {
  2378. struct ql_tx_buf_cb *tx_cb;
  2379. int i;
  2380. tx_cb = &qdev->tx_buf[0];
  2381. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2382. kfree(tx_cb->oal);
  2383. tx_cb->oal = NULL;
  2384. tx_cb++;
  2385. }
  2386. }
  2387. static int ql_create_send_free_list(struct ql3_adapter *qdev)
  2388. {
  2389. struct ql_tx_buf_cb *tx_cb;
  2390. int i;
  2391. struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
  2392. /* Create free list of transmit buffers */
  2393. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  2394. tx_cb = &qdev->tx_buf[i];
  2395. tx_cb->skb = NULL;
  2396. tx_cb->queue_entry = req_q_curr;
  2397. req_q_curr++;
  2398. tx_cb->oal = kmalloc(512, GFP_KERNEL);
  2399. if (tx_cb->oal == NULL)
  2400. return -ENOMEM;
  2401. }
  2402. return 0;
  2403. }
  2404. static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
  2405. {
  2406. if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
  2407. qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
  2408. qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
  2409. } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
  2410. /*
  2411. * Bigger buffers, so less of them.
  2412. */
  2413. qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
  2414. qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
  2415. } else {
  2416. netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
  2417. qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
  2418. return -ENOMEM;
  2419. }
  2420. qdev->num_large_buffers =
  2421. qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
  2422. qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
  2423. qdev->max_frame_size =
  2424. (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
  2425. /*
  2426. * First allocate a page of shared memory and use it for shadow
  2427. * locations of Network Request Queue Consumer Address Register and
  2428. * Network Completion Queue Producer Index Register
  2429. */
  2430. qdev->shadow_reg_virt_addr =
  2431. pci_alloc_consistent(qdev->pdev,
  2432. PAGE_SIZE, &qdev->shadow_reg_phy_addr);
  2433. if (qdev->shadow_reg_virt_addr != NULL) {
  2434. qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
  2435. qdev->req_consumer_index_phy_addr_high =
  2436. MS_64BITS(qdev->shadow_reg_phy_addr);
  2437. qdev->req_consumer_index_phy_addr_low =
  2438. LS_64BITS(qdev->shadow_reg_phy_addr);
  2439. qdev->prsp_producer_index =
  2440. (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
  2441. qdev->rsp_producer_index_phy_addr_high =
  2442. qdev->req_consumer_index_phy_addr_high;
  2443. qdev->rsp_producer_index_phy_addr_low =
  2444. qdev->req_consumer_index_phy_addr_low + 8;
  2445. } else {
  2446. netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
  2447. return -ENOMEM;
  2448. }
  2449. if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
  2450. netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
  2451. goto err_req_rsp;
  2452. }
  2453. if (ql_alloc_buffer_queues(qdev) != 0) {
  2454. netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
  2455. goto err_buffer_queues;
  2456. }
  2457. if (ql_alloc_small_buffers(qdev) != 0) {
  2458. netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
  2459. goto err_small_buffers;
  2460. }
  2461. if (ql_alloc_large_buffers(qdev) != 0) {
  2462. netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
  2463. goto err_small_buffers;
  2464. }
  2465. /* Initialize the large buffer queue. */
  2466. ql_init_large_buffers(qdev);
  2467. if (ql_create_send_free_list(qdev))
  2468. goto err_free_list;
  2469. qdev->rsp_current = qdev->rsp_q_virt_addr;
  2470. return 0;
  2471. err_free_list:
  2472. ql_free_send_free_list(qdev);
  2473. err_small_buffers:
  2474. ql_free_buffer_queues(qdev);
  2475. err_buffer_queues:
  2476. ql_free_net_req_rsp_queues(qdev);
  2477. err_req_rsp:
  2478. pci_free_consistent(qdev->pdev,
  2479. PAGE_SIZE,
  2480. qdev->shadow_reg_virt_addr,
  2481. qdev->shadow_reg_phy_addr);
  2482. return -ENOMEM;
  2483. }
  2484. static void ql_free_mem_resources(struct ql3_adapter *qdev)
  2485. {
  2486. ql_free_send_free_list(qdev);
  2487. ql_free_large_buffers(qdev);
  2488. ql_free_small_buffers(qdev);
  2489. ql_free_buffer_queues(qdev);
  2490. ql_free_net_req_rsp_queues(qdev);
  2491. if (qdev->shadow_reg_virt_addr != NULL) {
  2492. pci_free_consistent(qdev->pdev,
  2493. PAGE_SIZE,
  2494. qdev->shadow_reg_virt_addr,
  2495. qdev->shadow_reg_phy_addr);
  2496. qdev->shadow_reg_virt_addr = NULL;
  2497. }
  2498. }
  2499. static int ql_init_misc_registers(struct ql3_adapter *qdev)
  2500. {
  2501. struct ql3xxx_local_ram_registers __iomem *local_ram =
  2502. (void __iomem *)qdev->mem_map_registers;
  2503. if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
  2504. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  2505. 2) << 4))
  2506. return -1;
  2507. ql_write_page2_reg(qdev,
  2508. &local_ram->bufletSize, qdev->nvram_data.bufletSize);
  2509. ql_write_page2_reg(qdev,
  2510. &local_ram->maxBufletCount,
  2511. qdev->nvram_data.bufletCount);
  2512. ql_write_page2_reg(qdev,
  2513. &local_ram->freeBufletThresholdLow,
  2514. (qdev->nvram_data.tcpWindowThreshold25 << 16) |
  2515. (qdev->nvram_data.tcpWindowThreshold0));
  2516. ql_write_page2_reg(qdev,
  2517. &local_ram->freeBufletThresholdHigh,
  2518. qdev->nvram_data.tcpWindowThreshold50);
  2519. ql_write_page2_reg(qdev,
  2520. &local_ram->ipHashTableBase,
  2521. (qdev->nvram_data.ipHashTableBaseHi << 16) |
  2522. qdev->nvram_data.ipHashTableBaseLo);
  2523. ql_write_page2_reg(qdev,
  2524. &local_ram->ipHashTableCount,
  2525. qdev->nvram_data.ipHashTableSize);
  2526. ql_write_page2_reg(qdev,
  2527. &local_ram->tcpHashTableBase,
  2528. (qdev->nvram_data.tcpHashTableBaseHi << 16) |
  2529. qdev->nvram_data.tcpHashTableBaseLo);
  2530. ql_write_page2_reg(qdev,
  2531. &local_ram->tcpHashTableCount,
  2532. qdev->nvram_data.tcpHashTableSize);
  2533. ql_write_page2_reg(qdev,
  2534. &local_ram->ncbBase,
  2535. (qdev->nvram_data.ncbTableBaseHi << 16) |
  2536. qdev->nvram_data.ncbTableBaseLo);
  2537. ql_write_page2_reg(qdev,
  2538. &local_ram->maxNcbCount,
  2539. qdev->nvram_data.ncbTableSize);
  2540. ql_write_page2_reg(qdev,
  2541. &local_ram->drbBase,
  2542. (qdev->nvram_data.drbTableBaseHi << 16) |
  2543. qdev->nvram_data.drbTableBaseLo);
  2544. ql_write_page2_reg(qdev,
  2545. &local_ram->maxDrbCount,
  2546. qdev->nvram_data.drbTableSize);
  2547. ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
  2548. return 0;
  2549. }
  2550. static int ql_adapter_initialize(struct ql3_adapter *qdev)
  2551. {
  2552. u32 value;
  2553. struct ql3xxx_port_registers __iomem *port_regs =
  2554. qdev->mem_map_registers;
  2555. __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
  2556. struct ql3xxx_host_memory_registers __iomem *hmem_regs =
  2557. (void __iomem *)port_regs;
  2558. u32 delay = 10;
  2559. int status = 0;
  2560. if (ql_mii_setup(qdev))
  2561. return -1;
  2562. /* Bring out PHY out of reset */
  2563. ql_write_common_reg(qdev, spir,
  2564. (ISP_SERIAL_PORT_IF_WE |
  2565. (ISP_SERIAL_PORT_IF_WE << 16)));
  2566. /* Give the PHY time to come out of reset. */
  2567. mdelay(100);
  2568. qdev->port_link_state = LS_DOWN;
  2569. netif_carrier_off(qdev->ndev);
  2570. /* V2 chip fix for ARS-39168. */
  2571. ql_write_common_reg(qdev, spir,
  2572. (ISP_SERIAL_PORT_IF_SDE |
  2573. (ISP_SERIAL_PORT_IF_SDE << 16)));
  2574. /* Request Queue Registers */
  2575. *((u32 *)(qdev->preq_consumer_index)) = 0;
  2576. atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
  2577. qdev->req_producer_index = 0;
  2578. ql_write_page1_reg(qdev,
  2579. &hmem_regs->reqConsumerIndexAddrHigh,
  2580. qdev->req_consumer_index_phy_addr_high);
  2581. ql_write_page1_reg(qdev,
  2582. &hmem_regs->reqConsumerIndexAddrLow,
  2583. qdev->req_consumer_index_phy_addr_low);
  2584. ql_write_page1_reg(qdev,
  2585. &hmem_regs->reqBaseAddrHigh,
  2586. MS_64BITS(qdev->req_q_phy_addr));
  2587. ql_write_page1_reg(qdev,
  2588. &hmem_regs->reqBaseAddrLow,
  2589. LS_64BITS(qdev->req_q_phy_addr));
  2590. ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
  2591. /* Response Queue Registers */
  2592. *((__le16 *) (qdev->prsp_producer_index)) = 0;
  2593. qdev->rsp_consumer_index = 0;
  2594. qdev->rsp_current = qdev->rsp_q_virt_addr;
  2595. ql_write_page1_reg(qdev,
  2596. &hmem_regs->rspProducerIndexAddrHigh,
  2597. qdev->rsp_producer_index_phy_addr_high);
  2598. ql_write_page1_reg(qdev,
  2599. &hmem_regs->rspProducerIndexAddrLow,
  2600. qdev->rsp_producer_index_phy_addr_low);
  2601. ql_write_page1_reg(qdev,
  2602. &hmem_regs->rspBaseAddrHigh,
  2603. MS_64BITS(qdev->rsp_q_phy_addr));
  2604. ql_write_page1_reg(qdev,
  2605. &hmem_regs->rspBaseAddrLow,
  2606. LS_64BITS(qdev->rsp_q_phy_addr));
  2607. ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
  2608. /* Large Buffer Queue */
  2609. ql_write_page1_reg(qdev,
  2610. &hmem_regs->rxLargeQBaseAddrHigh,
  2611. MS_64BITS(qdev->lrg_buf_q_phy_addr));
  2612. ql_write_page1_reg(qdev,
  2613. &hmem_regs->rxLargeQBaseAddrLow,
  2614. LS_64BITS(qdev->lrg_buf_q_phy_addr));
  2615. ql_write_page1_reg(qdev,
  2616. &hmem_regs->rxLargeQLength,
  2617. qdev->num_lbufq_entries);
  2618. ql_write_page1_reg(qdev,
  2619. &hmem_regs->rxLargeBufferLength,
  2620. qdev->lrg_buffer_len);
  2621. /* Small Buffer Queue */
  2622. ql_write_page1_reg(qdev,
  2623. &hmem_regs->rxSmallQBaseAddrHigh,
  2624. MS_64BITS(qdev->small_buf_q_phy_addr));
  2625. ql_write_page1_reg(qdev,
  2626. &hmem_regs->rxSmallQBaseAddrLow,
  2627. LS_64BITS(qdev->small_buf_q_phy_addr));
  2628. ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
  2629. ql_write_page1_reg(qdev,
  2630. &hmem_regs->rxSmallBufferLength,
  2631. QL_SMALL_BUFFER_SIZE);
  2632. qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
  2633. qdev->small_buf_release_cnt = 8;
  2634. qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
  2635. qdev->lrg_buf_release_cnt = 8;
  2636. qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
  2637. qdev->small_buf_index = 0;
  2638. qdev->lrg_buf_index = 0;
  2639. qdev->lrg_buf_free_count = 0;
  2640. qdev->lrg_buf_free_head = NULL;
  2641. qdev->lrg_buf_free_tail = NULL;
  2642. ql_write_common_reg(qdev,
  2643. &port_regs->CommonRegs.
  2644. rxSmallQProducerIndex,
  2645. qdev->small_buf_q_producer_index);
  2646. ql_write_common_reg(qdev,
  2647. &port_regs->CommonRegs.
  2648. rxLargeQProducerIndex,
  2649. qdev->lrg_buf_q_producer_index);
  2650. /*
  2651. * Find out if the chip has already been initialized. If it has, then
  2652. * we skip some of the initialization.
  2653. */
  2654. clear_bit(QL_LINK_MASTER, &qdev->flags);
  2655. value = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2656. if ((value & PORT_STATUS_IC) == 0) {
  2657. /* Chip has not been configured yet, so let it rip. */
  2658. if (ql_init_misc_registers(qdev)) {
  2659. status = -1;
  2660. goto out;
  2661. }
  2662. value = qdev->nvram_data.tcpMaxWindowSize;
  2663. ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
  2664. value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
  2665. if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
  2666. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
  2667. * 2) << 13)) {
  2668. status = -1;
  2669. goto out;
  2670. }
  2671. ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
  2672. ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
  2673. (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
  2674. 16) | (INTERNAL_CHIP_SD |
  2675. INTERNAL_CHIP_WE)));
  2676. ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
  2677. }
  2678. if (qdev->mac_index)
  2679. ql_write_page0_reg(qdev,
  2680. &port_regs->mac1MaxFrameLengthReg,
  2681. qdev->max_frame_size);
  2682. else
  2683. ql_write_page0_reg(qdev,
  2684. &port_regs->mac0MaxFrameLengthReg,
  2685. qdev->max_frame_size);
  2686. if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
  2687. (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
  2688. 2) << 7)) {
  2689. status = -1;
  2690. goto out;
  2691. }
  2692. PHY_Setup(qdev);
  2693. ql_init_scan_mode(qdev);
  2694. ql_get_phy_owner(qdev);
  2695. /* Load the MAC Configuration */
  2696. /* Program lower 32 bits of the MAC address */
  2697. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2698. (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
  2699. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2700. ((qdev->ndev->dev_addr[2] << 24)
  2701. | (qdev->ndev->dev_addr[3] << 16)
  2702. | (qdev->ndev->dev_addr[4] << 8)
  2703. | qdev->ndev->dev_addr[5]));
  2704. /* Program top 16 bits of the MAC address */
  2705. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2706. ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
  2707. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  2708. ((qdev->ndev->dev_addr[0] << 8)
  2709. | qdev->ndev->dev_addr[1]));
  2710. /* Enable Primary MAC */
  2711. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  2712. ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
  2713. MAC_ADDR_INDIRECT_PTR_REG_PE));
  2714. /* Clear Primary and Secondary IP addresses */
  2715. ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
  2716. ((IP_ADDR_INDEX_REG_MASK << 16) |
  2717. (qdev->mac_index << 2)));
  2718. ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
  2719. ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
  2720. ((IP_ADDR_INDEX_REG_MASK << 16) |
  2721. ((qdev->mac_index << 2) + 1)));
  2722. ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
  2723. ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
  2724. /* Indicate Configuration Complete */
  2725. ql_write_page0_reg(qdev,
  2726. &port_regs->portControl,
  2727. ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
  2728. do {
  2729. value = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2730. if (value & PORT_STATUS_IC)
  2731. break;
  2732. spin_unlock_irq(&qdev->hw_lock);
  2733. msleep(500);
  2734. spin_lock_irq(&qdev->hw_lock);
  2735. } while (--delay);
  2736. if (delay == 0) {
  2737. netdev_err(qdev->ndev, "Hw Initialization timeout\n");
  2738. status = -1;
  2739. goto out;
  2740. }
  2741. /* Enable Ethernet Function */
  2742. if (qdev->device_id == QL3032_DEVICE_ID) {
  2743. value =
  2744. (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
  2745. QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
  2746. QL3032_PORT_CONTROL_ET);
  2747. ql_write_page0_reg(qdev, &port_regs->functionControl,
  2748. ((value << 16) | value));
  2749. } else {
  2750. value =
  2751. (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
  2752. PORT_CONTROL_HH);
  2753. ql_write_page0_reg(qdev, &port_regs->portControl,
  2754. ((value << 16) | value));
  2755. }
  2756. out:
  2757. return status;
  2758. }
  2759. /*
  2760. * Caller holds hw_lock.
  2761. */
  2762. static int ql_adapter_reset(struct ql3_adapter *qdev)
  2763. {
  2764. struct ql3xxx_port_registers __iomem *port_regs =
  2765. qdev->mem_map_registers;
  2766. int status = 0;
  2767. u16 value;
  2768. int max_wait_time;
  2769. set_bit(QL_RESET_ACTIVE, &qdev->flags);
  2770. clear_bit(QL_RESET_DONE, &qdev->flags);
  2771. /*
  2772. * Issue soft reset to chip.
  2773. */
  2774. netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
  2775. ql_write_common_reg(qdev,
  2776. &port_regs->CommonRegs.ispControlStatus,
  2777. ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
  2778. /* Wait 3 seconds for reset to complete. */
  2779. netdev_printk(KERN_DEBUG, qdev->ndev,
  2780. "Wait 10 milliseconds for reset to complete\n");
  2781. /* Wait until the firmware tells us the Soft Reset is done */
  2782. max_wait_time = 5;
  2783. do {
  2784. value =
  2785. ql_read_common_reg(qdev,
  2786. &port_regs->CommonRegs.ispControlStatus);
  2787. if ((value & ISP_CONTROL_SR) == 0)
  2788. break;
  2789. ssleep(1);
  2790. } while ((--max_wait_time));
  2791. /*
  2792. * Also, make sure that the Network Reset Interrupt bit has been
  2793. * cleared after the soft reset has taken place.
  2794. */
  2795. value =
  2796. ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
  2797. if (value & ISP_CONTROL_RI) {
  2798. netdev_printk(KERN_DEBUG, qdev->ndev,
  2799. "clearing RI after reset\n");
  2800. ql_write_common_reg(qdev,
  2801. &port_regs->CommonRegs.
  2802. ispControlStatus,
  2803. ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
  2804. }
  2805. if (max_wait_time == 0) {
  2806. /* Issue Force Soft Reset */
  2807. ql_write_common_reg(qdev,
  2808. &port_regs->CommonRegs.
  2809. ispControlStatus,
  2810. ((ISP_CONTROL_FSR << 16) |
  2811. ISP_CONTROL_FSR));
  2812. /*
  2813. * Wait until the firmware tells us the Force Soft Reset is
  2814. * done
  2815. */
  2816. max_wait_time = 5;
  2817. do {
  2818. value = ql_read_common_reg(qdev,
  2819. &port_regs->CommonRegs.
  2820. ispControlStatus);
  2821. if ((value & ISP_CONTROL_FSR) == 0)
  2822. break;
  2823. ssleep(1);
  2824. } while ((--max_wait_time));
  2825. }
  2826. if (max_wait_time == 0)
  2827. status = 1;
  2828. clear_bit(QL_RESET_ACTIVE, &qdev->flags);
  2829. set_bit(QL_RESET_DONE, &qdev->flags);
  2830. return status;
  2831. }
  2832. static void ql_set_mac_info(struct ql3_adapter *qdev)
  2833. {
  2834. struct ql3xxx_port_registers __iomem *port_regs =
  2835. qdev->mem_map_registers;
  2836. u32 value, port_status;
  2837. u8 func_number;
  2838. /* Get the function number */
  2839. value =
  2840. ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
  2841. func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
  2842. port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
  2843. switch (value & ISP_CONTROL_FN_MASK) {
  2844. case ISP_CONTROL_FN0_NET:
  2845. qdev->mac_index = 0;
  2846. qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
  2847. qdev->mb_bit_mask = FN0_MA_BITS_MASK;
  2848. qdev->PHYAddr = PORT0_PHY_ADDRESS;
  2849. if (port_status & PORT_STATUS_SM0)
  2850. set_bit(QL_LINK_OPTICAL, &qdev->flags);
  2851. else
  2852. clear_bit(QL_LINK_OPTICAL, &qdev->flags);
  2853. break;
  2854. case ISP_CONTROL_FN1_NET:
  2855. qdev->mac_index = 1;
  2856. qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
  2857. qdev->mb_bit_mask = FN1_MA_BITS_MASK;
  2858. qdev->PHYAddr = PORT1_PHY_ADDRESS;
  2859. if (port_status & PORT_STATUS_SM1)
  2860. set_bit(QL_LINK_OPTICAL, &qdev->flags);
  2861. else
  2862. clear_bit(QL_LINK_OPTICAL, &qdev->flags);
  2863. break;
  2864. case ISP_CONTROL_FN0_SCSI:
  2865. case ISP_CONTROL_FN1_SCSI:
  2866. default:
  2867. netdev_printk(KERN_DEBUG, qdev->ndev,
  2868. "Invalid function number, ispControlStatus = 0x%x\n",
  2869. value);
  2870. break;
  2871. }
  2872. qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
  2873. }
  2874. static void ql_display_dev_info(struct net_device *ndev)
  2875. {
  2876. struct ql3_adapter *qdev = netdev_priv(ndev);
  2877. struct pci_dev *pdev = qdev->pdev;
  2878. netdev_info(ndev,
  2879. "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
  2880. DRV_NAME, qdev->index, qdev->chip_rev_id,
  2881. qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
  2882. qdev->pci_slot);
  2883. netdev_info(ndev, "%s Interface\n",
  2884. test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
  2885. /*
  2886. * Print PCI bus width/type.
  2887. */
  2888. netdev_info(ndev, "Bus interface is %s %s\n",
  2889. ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
  2890. ((qdev->pci_x) ? "PCI-X" : "PCI"));
  2891. netdev_info(ndev, "mem IO base address adjusted = 0x%p\n",
  2892. qdev->mem_map_registers);
  2893. netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
  2894. netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
  2895. }
  2896. static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
  2897. {
  2898. struct net_device *ndev = qdev->ndev;
  2899. int retval = 0;
  2900. netif_stop_queue(ndev);
  2901. netif_carrier_off(ndev);
  2902. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  2903. clear_bit(QL_LINK_MASTER, &qdev->flags);
  2904. ql_disable_interrupts(qdev);
  2905. free_irq(qdev->pdev->irq, ndev);
  2906. if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
  2907. netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
  2908. clear_bit(QL_MSI_ENABLED, &qdev->flags);
  2909. pci_disable_msi(qdev->pdev);
  2910. }
  2911. del_timer_sync(&qdev->adapter_timer);
  2912. napi_disable(&qdev->napi);
  2913. if (do_reset) {
  2914. int soft_reset;
  2915. unsigned long hw_flags;
  2916. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  2917. if (ql_wait_for_drvr_lock(qdev)) {
  2918. soft_reset = ql_adapter_reset(qdev);
  2919. if (soft_reset) {
  2920. netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
  2921. qdev->index);
  2922. }
  2923. netdev_err(ndev,
  2924. "Releasing driver lock via chip reset\n");
  2925. } else {
  2926. netdev_err(ndev,
  2927. "Could not acquire driver lock to do reset!\n");
  2928. retval = -1;
  2929. }
  2930. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  2931. }
  2932. ql_free_mem_resources(qdev);
  2933. return retval;
  2934. }
  2935. static int ql_adapter_up(struct ql3_adapter *qdev)
  2936. {
  2937. struct net_device *ndev = qdev->ndev;
  2938. int err;
  2939. unsigned long irq_flags = IRQF_SHARED;
  2940. unsigned long hw_flags;
  2941. if (ql_alloc_mem_resources(qdev)) {
  2942. netdev_err(ndev, "Unable to allocate buffers\n");
  2943. return -ENOMEM;
  2944. }
  2945. if (qdev->msi) {
  2946. if (pci_enable_msi(qdev->pdev)) {
  2947. netdev_err(ndev,
  2948. "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n");
  2949. qdev->msi = 0;
  2950. } else {
  2951. netdev_info(ndev, "MSI Enabled...\n");
  2952. set_bit(QL_MSI_ENABLED, &qdev->flags);
  2953. irq_flags &= ~IRQF_SHARED;
  2954. }
  2955. }
  2956. err = request_irq(qdev->pdev->irq, ql3xxx_isr,
  2957. irq_flags, ndev->name, ndev);
  2958. if (err) {
  2959. netdev_err(ndev,
  2960. "Failed to reserve interrupt %d - already in use\n",
  2961. qdev->pdev->irq);
  2962. goto err_irq;
  2963. }
  2964. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  2965. err = ql_wait_for_drvr_lock(qdev);
  2966. if (err) {
  2967. err = ql_adapter_initialize(qdev);
  2968. if (err) {
  2969. netdev_err(ndev, "Unable to initialize adapter\n");
  2970. goto err_init;
  2971. }
  2972. netdev_err(ndev, "Releasing driver lock\n");
  2973. ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
  2974. } else {
  2975. netdev_err(ndev, "Could not acquire driver lock\n");
  2976. goto err_lock;
  2977. }
  2978. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  2979. set_bit(QL_ADAPTER_UP, &qdev->flags);
  2980. mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
  2981. napi_enable(&qdev->napi);
  2982. ql_enable_interrupts(qdev);
  2983. return 0;
  2984. err_init:
  2985. ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
  2986. err_lock:
  2987. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  2988. free_irq(qdev->pdev->irq, ndev);
  2989. err_irq:
  2990. if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
  2991. netdev_info(ndev, "calling pci_disable_msi()\n");
  2992. clear_bit(QL_MSI_ENABLED, &qdev->flags);
  2993. pci_disable_msi(qdev->pdev);
  2994. }
  2995. return err;
  2996. }
  2997. static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
  2998. {
  2999. if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
  3000. netdev_err(qdev->ndev,
  3001. "Driver up/down cycle failed, closing device\n");
  3002. rtnl_lock();
  3003. dev_close(qdev->ndev);
  3004. rtnl_unlock();
  3005. return -1;
  3006. }
  3007. return 0;
  3008. }
  3009. static int ql3xxx_close(struct net_device *ndev)
  3010. {
  3011. struct ql3_adapter *qdev = netdev_priv(ndev);
  3012. /*
  3013. * Wait for device to recover from a reset.
  3014. * (Rarely happens, but possible.)
  3015. */
  3016. while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
  3017. msleep(50);
  3018. ql_adapter_down(qdev, QL_DO_RESET);
  3019. return 0;
  3020. }
  3021. static int ql3xxx_open(struct net_device *ndev)
  3022. {
  3023. struct ql3_adapter *qdev = netdev_priv(ndev);
  3024. return ql_adapter_up(qdev);
  3025. }
  3026. static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
  3027. {
  3028. struct ql3_adapter *qdev = netdev_priv(ndev);
  3029. struct ql3xxx_port_registers __iomem *port_regs =
  3030. qdev->mem_map_registers;
  3031. struct sockaddr *addr = p;
  3032. unsigned long hw_flags;
  3033. if (netif_running(ndev))
  3034. return -EBUSY;
  3035. if (!is_valid_ether_addr(addr->sa_data))
  3036. return -EADDRNOTAVAIL;
  3037. memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
  3038. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3039. /* Program lower 32 bits of the MAC address */
  3040. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  3041. (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
  3042. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  3043. ((ndev->dev_addr[2] << 24) | (ndev->
  3044. dev_addr[3] << 16) |
  3045. (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
  3046. /* Program top 16 bits of the MAC address */
  3047. ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
  3048. ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
  3049. ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
  3050. ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
  3051. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3052. return 0;
  3053. }
  3054. static void ql3xxx_tx_timeout(struct net_device *ndev)
  3055. {
  3056. struct ql3_adapter *qdev = netdev_priv(ndev);
  3057. netdev_err(ndev, "Resetting...\n");
  3058. /*
  3059. * Stop the queues, we've got a problem.
  3060. */
  3061. netif_stop_queue(ndev);
  3062. /*
  3063. * Wake up the worker to process this event.
  3064. */
  3065. queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
  3066. }
  3067. static void ql_reset_work(struct work_struct *work)
  3068. {
  3069. struct ql3_adapter *qdev =
  3070. container_of(work, struct ql3_adapter, reset_work.work);
  3071. struct net_device *ndev = qdev->ndev;
  3072. u32 value;
  3073. struct ql_tx_buf_cb *tx_cb;
  3074. int max_wait_time, i;
  3075. struct ql3xxx_port_registers __iomem *port_regs =
  3076. qdev->mem_map_registers;
  3077. unsigned long hw_flags;
  3078. if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
  3079. clear_bit(QL_LINK_MASTER, &qdev->flags);
  3080. /*
  3081. * Loop through the active list and return the skb.
  3082. */
  3083. for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
  3084. int j;
  3085. tx_cb = &qdev->tx_buf[i];
  3086. if (tx_cb->skb) {
  3087. netdev_printk(KERN_DEBUG, ndev,
  3088. "Freeing lost SKB\n");
  3089. pci_unmap_single(qdev->pdev,
  3090. dma_unmap_addr(&tx_cb->map[0],
  3091. mapaddr),
  3092. dma_unmap_len(&tx_cb->map[0], maplen),
  3093. PCI_DMA_TODEVICE);
  3094. for (j = 1; j < tx_cb->seg_count; j++) {
  3095. pci_unmap_page(qdev->pdev,
  3096. dma_unmap_addr(&tx_cb->map[j],
  3097. mapaddr),
  3098. dma_unmap_len(&tx_cb->map[j],
  3099. maplen),
  3100. PCI_DMA_TODEVICE);
  3101. }
  3102. dev_kfree_skb(tx_cb->skb);
  3103. tx_cb->skb = NULL;
  3104. }
  3105. }
  3106. netdev_err(ndev, "Clearing NRI after reset\n");
  3107. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3108. ql_write_common_reg(qdev,
  3109. &port_regs->CommonRegs.
  3110. ispControlStatus,
  3111. ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
  3112. /*
  3113. * Wait the for Soft Reset to Complete.
  3114. */
  3115. max_wait_time = 10;
  3116. do {
  3117. value = ql_read_common_reg(qdev,
  3118. &port_regs->CommonRegs.
  3119. ispControlStatus);
  3120. if ((value & ISP_CONTROL_SR) == 0) {
  3121. netdev_printk(KERN_DEBUG, ndev,
  3122. "reset completed\n");
  3123. break;
  3124. }
  3125. if (value & ISP_CONTROL_RI) {
  3126. netdev_printk(KERN_DEBUG, ndev,
  3127. "clearing NRI after reset\n");
  3128. ql_write_common_reg(qdev,
  3129. &port_regs->
  3130. CommonRegs.
  3131. ispControlStatus,
  3132. ((ISP_CONTROL_RI <<
  3133. 16) | ISP_CONTROL_RI));
  3134. }
  3135. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3136. ssleep(1);
  3137. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  3138. } while (--max_wait_time);
  3139. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  3140. if (value & ISP_CONTROL_SR) {
  3141. /*
  3142. * Set the reset flags and clear the board again.
  3143. * Nothing else to do...
  3144. */
  3145. netdev_err(ndev,
  3146. "Timed out waiting for reset to complete\n");
  3147. netdev_err(ndev, "Do a reset\n");
  3148. clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
  3149. clear_bit(QL_RESET_START, &qdev->flags);
  3150. ql_cycle_adapter(qdev, QL_DO_RESET);
  3151. return;
  3152. }
  3153. clear_bit(QL_RESET_ACTIVE, &qdev->flags);
  3154. clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
  3155. clear_bit(QL_RESET_START, &qdev->flags);
  3156. ql_cycle_adapter(qdev, QL_NO_RESET);
  3157. }
  3158. }
  3159. static void ql_tx_timeout_work(struct work_struct *work)
  3160. {
  3161. struct ql3_adapter *qdev =
  3162. container_of(work, struct ql3_adapter, tx_timeout_work.work);
  3163. ql_cycle_adapter(qdev, QL_DO_RESET);
  3164. }
  3165. static void ql_get_board_info(struct ql3_adapter *qdev)
  3166. {
  3167. struct ql3xxx_port_registers __iomem *port_regs =
  3168. qdev->mem_map_registers;
  3169. u32 value;
  3170. value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
  3171. qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
  3172. if (value & PORT_STATUS_64)
  3173. qdev->pci_width = 64;
  3174. else
  3175. qdev->pci_width = 32;
  3176. if (value & PORT_STATUS_X)
  3177. qdev->pci_x = 1;
  3178. else
  3179. qdev->pci_x = 0;
  3180. qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
  3181. }
  3182. static void ql3xxx_timer(unsigned long ptr)
  3183. {
  3184. struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
  3185. queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
  3186. }
  3187. static const struct net_device_ops ql3xxx_netdev_ops = {
  3188. .ndo_open = ql3xxx_open,
  3189. .ndo_start_xmit = ql3xxx_send,
  3190. .ndo_stop = ql3xxx_close,
  3191. .ndo_validate_addr = eth_validate_addr,
  3192. .ndo_set_mac_address = ql3xxx_set_mac_address,
  3193. .ndo_tx_timeout = ql3xxx_tx_timeout,
  3194. };
  3195. static int ql3xxx_probe(struct pci_dev *pdev,
  3196. const struct pci_device_id *pci_entry)
  3197. {
  3198. struct net_device *ndev = NULL;
  3199. struct ql3_adapter *qdev = NULL;
  3200. static int cards_found;
  3201. int uninitialized_var(pci_using_dac), err;
  3202. err = pci_enable_device(pdev);
  3203. if (err) {
  3204. pr_err("%s cannot enable PCI device\n", pci_name(pdev));
  3205. goto err_out;
  3206. }
  3207. err = pci_request_regions(pdev, DRV_NAME);
  3208. if (err) {
  3209. pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
  3210. goto err_out_disable_pdev;
  3211. }
  3212. pci_set_master(pdev);
  3213. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  3214. pci_using_dac = 1;
  3215. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  3216. } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
  3217. pci_using_dac = 0;
  3218. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  3219. }
  3220. if (err) {
  3221. pr_err("%s no usable DMA configuration\n", pci_name(pdev));
  3222. goto err_out_free_regions;
  3223. }
  3224. ndev = alloc_etherdev(sizeof(struct ql3_adapter));
  3225. if (!ndev) {
  3226. err = -ENOMEM;
  3227. goto err_out_free_regions;
  3228. }
  3229. SET_NETDEV_DEV(ndev, &pdev->dev);
  3230. pci_set_drvdata(pdev, ndev);
  3231. qdev = netdev_priv(ndev);
  3232. qdev->index = cards_found;
  3233. qdev->ndev = ndev;
  3234. qdev->pdev = pdev;
  3235. qdev->device_id = pci_entry->device;
  3236. qdev->port_link_state = LS_DOWN;
  3237. if (msi)
  3238. qdev->msi = 1;
  3239. qdev->msg_enable = netif_msg_init(debug, default_msg);
  3240. if (pci_using_dac)
  3241. ndev->features |= NETIF_F_HIGHDMA;
  3242. if (qdev->device_id == QL3032_DEVICE_ID)
  3243. ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
  3244. qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
  3245. if (!qdev->mem_map_registers) {
  3246. pr_err("%s: cannot map device registers\n", pci_name(pdev));
  3247. err = -EIO;
  3248. goto err_out_free_ndev;
  3249. }
  3250. spin_lock_init(&qdev->adapter_lock);
  3251. spin_lock_init(&qdev->hw_lock);
  3252. /* Set driver entry points */
  3253. ndev->netdev_ops = &ql3xxx_netdev_ops;
  3254. ndev->ethtool_ops = &ql3xxx_ethtool_ops;
  3255. ndev->watchdog_timeo = 5 * HZ;
  3256. netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
  3257. ndev->irq = pdev->irq;
  3258. /* make sure the EEPROM is good */
  3259. if (ql_get_nvram_params(qdev)) {
  3260. pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
  3261. __func__, qdev->index);
  3262. err = -EIO;
  3263. goto err_out_iounmap;
  3264. }
  3265. ql_set_mac_info(qdev);
  3266. /* Validate and set parameters */
  3267. if (qdev->mac_index) {
  3268. ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
  3269. ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
  3270. } else {
  3271. ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
  3272. ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
  3273. }
  3274. ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
  3275. /* Record PCI bus information. */
  3276. ql_get_board_info(qdev);
  3277. /*
  3278. * Set the Maximum Memory Read Byte Count value. We do this to handle
  3279. * jumbo frames.
  3280. */
  3281. if (qdev->pci_x)
  3282. pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
  3283. err = register_netdev(ndev);
  3284. if (err) {
  3285. pr_err("%s: cannot register net device\n", pci_name(pdev));
  3286. goto err_out_iounmap;
  3287. }
  3288. /* we're going to reset, so assume we have no link for now */
  3289. netif_carrier_off(ndev);
  3290. netif_stop_queue(ndev);
  3291. qdev->workqueue = create_singlethread_workqueue(ndev->name);
  3292. INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
  3293. INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
  3294. INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
  3295. init_timer(&qdev->adapter_timer);
  3296. qdev->adapter_timer.function = ql3xxx_timer;
  3297. qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
  3298. qdev->adapter_timer.data = (unsigned long)qdev;
  3299. if (!cards_found) {
  3300. pr_alert("%s\n", DRV_STRING);
  3301. pr_alert("Driver name: %s, Version: %s\n",
  3302. DRV_NAME, DRV_VERSION);
  3303. }
  3304. ql_display_dev_info(ndev);
  3305. cards_found++;
  3306. return 0;
  3307. err_out_iounmap:
  3308. iounmap(qdev->mem_map_registers);
  3309. err_out_free_ndev:
  3310. free_netdev(ndev);
  3311. err_out_free_regions:
  3312. pci_release_regions(pdev);
  3313. err_out_disable_pdev:
  3314. pci_disable_device(pdev);
  3315. err_out:
  3316. return err;
  3317. }
  3318. static void ql3xxx_remove(struct pci_dev *pdev)
  3319. {
  3320. struct net_device *ndev = pci_get_drvdata(pdev);
  3321. struct ql3_adapter *qdev = netdev_priv(ndev);
  3322. unregister_netdev(ndev);
  3323. ql_disable_interrupts(qdev);
  3324. if (qdev->workqueue) {
  3325. cancel_delayed_work(&qdev->reset_work);
  3326. cancel_delayed_work(&qdev->tx_timeout_work);
  3327. destroy_workqueue(qdev->workqueue);
  3328. qdev->workqueue = NULL;
  3329. }
  3330. iounmap(qdev->mem_map_registers);
  3331. pci_release_regions(pdev);
  3332. free_netdev(ndev);
  3333. }
  3334. static struct pci_driver ql3xxx_driver = {
  3335. .name = DRV_NAME,
  3336. .id_table = ql3xxx_pci_tbl,
  3337. .probe = ql3xxx_probe,
  3338. .remove = ql3xxx_remove,
  3339. };
  3340. module_pci_driver(ql3xxx_driver);