mv88e6xxx.c 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960
  1. /*
  2. * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
  3. * Copyright (c) 2008 Marvell Semiconductor
  4. *
  5. * Copyright (c) 2015 CMC Electronics, Inc.
  6. * Added support for VLAN Table Unit operations
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/debugfs.h>
  14. #include <linux/delay.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/ethtool.h>
  17. #include <linux/if_bridge.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/list.h>
  20. #include <linux/module.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/phy.h>
  23. #include <linux/seq_file.h>
  24. #include <net/dsa.h>
  25. #include "mv88e6xxx.h"
  26. /* MDIO bus access can be nested in the case of PHYs connected to the
  27. * internal MDIO bus of the switch, which is accessed via MDIO bus of
  28. * the Ethernet interface. Avoid lockdep false positives by using
  29. * mutex_lock_nested().
  30. */
  31. static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
  32. {
  33. int ret;
  34. mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
  35. ret = bus->read(bus, addr, regnum);
  36. mutex_unlock(&bus->mdio_lock);
  37. return ret;
  38. }
  39. static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
  40. u16 val)
  41. {
  42. int ret;
  43. mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
  44. ret = bus->write(bus, addr, regnum, val);
  45. mutex_unlock(&bus->mdio_lock);
  46. return ret;
  47. }
  48. /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
  49. * use all 32 SMI bus addresses on its SMI bus, and all switch registers
  50. * will be directly accessible on some {device address,register address}
  51. * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
  52. * will only respond to SMI transactions to that specific address, and
  53. * an indirect addressing mechanism needs to be used to access its
  54. * registers.
  55. */
  56. static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
  57. {
  58. int ret;
  59. int i;
  60. for (i = 0; i < 16; i++) {
  61. ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
  62. if (ret < 0)
  63. return ret;
  64. if ((ret & SMI_CMD_BUSY) == 0)
  65. return 0;
  66. }
  67. return -ETIMEDOUT;
  68. }
  69. int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
  70. {
  71. int ret;
  72. if (sw_addr == 0)
  73. return mv88e6xxx_mdiobus_read(bus, addr, reg);
  74. /* Wait for the bus to become free. */
  75. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  76. if (ret < 0)
  77. return ret;
  78. /* Transmit the read command. */
  79. ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
  80. SMI_CMD_OP_22_READ | (addr << 5) | reg);
  81. if (ret < 0)
  82. return ret;
  83. /* Wait for the read command to complete. */
  84. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  85. if (ret < 0)
  86. return ret;
  87. /* Read the data. */
  88. ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
  89. if (ret < 0)
  90. return ret;
  91. return ret & 0xffff;
  92. }
  93. /* Must be called with SMI mutex held */
  94. static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
  95. {
  96. struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
  97. int ret;
  98. if (bus == NULL)
  99. return -EINVAL;
  100. ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
  101. if (ret < 0)
  102. return ret;
  103. dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
  104. addr, reg, ret);
  105. return ret;
  106. }
  107. int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
  108. {
  109. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  110. int ret;
  111. mutex_lock(&ps->smi_mutex);
  112. ret = _mv88e6xxx_reg_read(ds, addr, reg);
  113. mutex_unlock(&ps->smi_mutex);
  114. return ret;
  115. }
  116. int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
  117. int reg, u16 val)
  118. {
  119. int ret;
  120. if (sw_addr == 0)
  121. return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
  122. /* Wait for the bus to become free. */
  123. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  124. if (ret < 0)
  125. return ret;
  126. /* Transmit the data to write. */
  127. ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
  128. if (ret < 0)
  129. return ret;
  130. /* Transmit the write command. */
  131. ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
  132. SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
  133. if (ret < 0)
  134. return ret;
  135. /* Wait for the write command to complete. */
  136. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  137. if (ret < 0)
  138. return ret;
  139. return 0;
  140. }
  141. /* Must be called with SMI mutex held */
  142. static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
  143. u16 val)
  144. {
  145. struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
  146. if (bus == NULL)
  147. return -EINVAL;
  148. dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
  149. addr, reg, val);
  150. return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
  151. }
  152. int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
  153. {
  154. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  155. int ret;
  156. mutex_lock(&ps->smi_mutex);
  157. ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
  158. mutex_unlock(&ps->smi_mutex);
  159. return ret;
  160. }
  161. int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
  162. {
  163. REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
  164. REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
  165. REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
  166. return 0;
  167. }
  168. int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
  169. {
  170. int i;
  171. int ret;
  172. for (i = 0; i < 6; i++) {
  173. int j;
  174. /* Write the MAC address byte. */
  175. REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
  176. GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
  177. /* Wait for the write to complete. */
  178. for (j = 0; j < 16; j++) {
  179. ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
  180. if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
  181. break;
  182. }
  183. if (j == 16)
  184. return -ETIMEDOUT;
  185. }
  186. return 0;
  187. }
  188. /* Must be called with SMI mutex held */
  189. static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
  190. {
  191. if (addr >= 0)
  192. return _mv88e6xxx_reg_read(ds, addr, regnum);
  193. return 0xffff;
  194. }
  195. /* Must be called with SMI mutex held */
  196. static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
  197. u16 val)
  198. {
  199. if (addr >= 0)
  200. return _mv88e6xxx_reg_write(ds, addr, regnum, val);
  201. return 0;
  202. }
  203. #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
  204. static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
  205. {
  206. int ret;
  207. unsigned long timeout;
  208. ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
  209. REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
  210. ret & ~GLOBAL_CONTROL_PPU_ENABLE);
  211. timeout = jiffies + 1 * HZ;
  212. while (time_before(jiffies, timeout)) {
  213. ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
  214. usleep_range(1000, 2000);
  215. if ((ret & GLOBAL_STATUS_PPU_MASK) !=
  216. GLOBAL_STATUS_PPU_POLLING)
  217. return 0;
  218. }
  219. return -ETIMEDOUT;
  220. }
  221. static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
  222. {
  223. int ret;
  224. unsigned long timeout;
  225. ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
  226. REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
  227. timeout = jiffies + 1 * HZ;
  228. while (time_before(jiffies, timeout)) {
  229. ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
  230. usleep_range(1000, 2000);
  231. if ((ret & GLOBAL_STATUS_PPU_MASK) ==
  232. GLOBAL_STATUS_PPU_POLLING)
  233. return 0;
  234. }
  235. return -ETIMEDOUT;
  236. }
  237. static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
  238. {
  239. struct mv88e6xxx_priv_state *ps;
  240. ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
  241. if (mutex_trylock(&ps->ppu_mutex)) {
  242. struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
  243. if (mv88e6xxx_ppu_enable(ds) == 0)
  244. ps->ppu_disabled = 0;
  245. mutex_unlock(&ps->ppu_mutex);
  246. }
  247. }
  248. static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
  249. {
  250. struct mv88e6xxx_priv_state *ps = (void *)_ps;
  251. schedule_work(&ps->ppu_work);
  252. }
  253. static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
  254. {
  255. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  256. int ret;
  257. mutex_lock(&ps->ppu_mutex);
  258. /* If the PHY polling unit is enabled, disable it so that
  259. * we can access the PHY registers. If it was already
  260. * disabled, cancel the timer that is going to re-enable
  261. * it.
  262. */
  263. if (!ps->ppu_disabled) {
  264. ret = mv88e6xxx_ppu_disable(ds);
  265. if (ret < 0) {
  266. mutex_unlock(&ps->ppu_mutex);
  267. return ret;
  268. }
  269. ps->ppu_disabled = 1;
  270. } else {
  271. del_timer(&ps->ppu_timer);
  272. ret = 0;
  273. }
  274. return ret;
  275. }
  276. static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
  277. {
  278. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  279. /* Schedule a timer to re-enable the PHY polling unit. */
  280. mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
  281. mutex_unlock(&ps->ppu_mutex);
  282. }
  283. void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
  284. {
  285. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  286. mutex_init(&ps->ppu_mutex);
  287. INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
  288. init_timer(&ps->ppu_timer);
  289. ps->ppu_timer.data = (unsigned long)ps;
  290. ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
  291. }
  292. int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
  293. {
  294. int ret;
  295. ret = mv88e6xxx_ppu_access_get(ds);
  296. if (ret >= 0) {
  297. ret = mv88e6xxx_reg_read(ds, addr, regnum);
  298. mv88e6xxx_ppu_access_put(ds);
  299. }
  300. return ret;
  301. }
  302. int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
  303. int regnum, u16 val)
  304. {
  305. int ret;
  306. ret = mv88e6xxx_ppu_access_get(ds);
  307. if (ret >= 0) {
  308. ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
  309. mv88e6xxx_ppu_access_put(ds);
  310. }
  311. return ret;
  312. }
  313. #endif
  314. void mv88e6xxx_poll_link(struct dsa_switch *ds)
  315. {
  316. int i;
  317. for (i = 0; i < DSA_MAX_PORTS; i++) {
  318. struct net_device *dev;
  319. int uninitialized_var(port_status);
  320. int pcs_ctrl;
  321. int link;
  322. int speed;
  323. int duplex;
  324. int fc;
  325. dev = ds->ports[i];
  326. if (dev == NULL)
  327. continue;
  328. pcs_ctrl = mv88e6xxx_reg_read(ds, REG_PORT(i), PORT_PCS_CTRL);
  329. if (pcs_ctrl < 0 || pcs_ctrl & PORT_PCS_CTRL_FORCE_LINK)
  330. continue;
  331. link = 0;
  332. if (dev->flags & IFF_UP) {
  333. port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
  334. PORT_STATUS);
  335. if (port_status < 0)
  336. continue;
  337. link = !!(port_status & PORT_STATUS_LINK);
  338. }
  339. if (!link) {
  340. if (netif_carrier_ok(dev)) {
  341. netdev_info(dev, "link down\n");
  342. netif_carrier_off(dev);
  343. }
  344. continue;
  345. }
  346. switch (port_status & PORT_STATUS_SPEED_MASK) {
  347. case PORT_STATUS_SPEED_10:
  348. speed = 10;
  349. break;
  350. case PORT_STATUS_SPEED_100:
  351. speed = 100;
  352. break;
  353. case PORT_STATUS_SPEED_1000:
  354. speed = 1000;
  355. break;
  356. default:
  357. speed = -1;
  358. break;
  359. }
  360. duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
  361. fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
  362. if (!netif_carrier_ok(dev)) {
  363. netdev_info(dev,
  364. "link up, %d Mb/s, %s duplex, flow control %sabled\n",
  365. speed,
  366. duplex ? "full" : "half",
  367. fc ? "en" : "dis");
  368. netif_carrier_on(dev);
  369. }
  370. }
  371. }
  372. static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
  373. {
  374. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  375. switch (ps->id) {
  376. case PORT_SWITCH_ID_6031:
  377. case PORT_SWITCH_ID_6061:
  378. case PORT_SWITCH_ID_6035:
  379. case PORT_SWITCH_ID_6065:
  380. return true;
  381. }
  382. return false;
  383. }
  384. static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
  385. {
  386. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  387. switch (ps->id) {
  388. case PORT_SWITCH_ID_6092:
  389. case PORT_SWITCH_ID_6095:
  390. return true;
  391. }
  392. return false;
  393. }
  394. static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
  395. {
  396. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  397. switch (ps->id) {
  398. case PORT_SWITCH_ID_6046:
  399. case PORT_SWITCH_ID_6085:
  400. case PORT_SWITCH_ID_6096:
  401. case PORT_SWITCH_ID_6097:
  402. return true;
  403. }
  404. return false;
  405. }
  406. static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
  407. {
  408. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  409. switch (ps->id) {
  410. case PORT_SWITCH_ID_6123:
  411. case PORT_SWITCH_ID_6161:
  412. case PORT_SWITCH_ID_6165:
  413. return true;
  414. }
  415. return false;
  416. }
  417. static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
  418. {
  419. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  420. switch (ps->id) {
  421. case PORT_SWITCH_ID_6121:
  422. case PORT_SWITCH_ID_6122:
  423. case PORT_SWITCH_ID_6152:
  424. case PORT_SWITCH_ID_6155:
  425. case PORT_SWITCH_ID_6182:
  426. case PORT_SWITCH_ID_6185:
  427. case PORT_SWITCH_ID_6108:
  428. case PORT_SWITCH_ID_6131:
  429. return true;
  430. }
  431. return false;
  432. }
  433. static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
  434. {
  435. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  436. switch (ps->id) {
  437. case PORT_SWITCH_ID_6320:
  438. case PORT_SWITCH_ID_6321:
  439. return true;
  440. }
  441. return false;
  442. }
  443. static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
  444. {
  445. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  446. switch (ps->id) {
  447. case PORT_SWITCH_ID_6171:
  448. case PORT_SWITCH_ID_6175:
  449. case PORT_SWITCH_ID_6350:
  450. case PORT_SWITCH_ID_6351:
  451. return true;
  452. }
  453. return false;
  454. }
  455. static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
  456. {
  457. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  458. switch (ps->id) {
  459. case PORT_SWITCH_ID_6172:
  460. case PORT_SWITCH_ID_6176:
  461. case PORT_SWITCH_ID_6240:
  462. case PORT_SWITCH_ID_6352:
  463. return true;
  464. }
  465. return false;
  466. }
  467. /* We expect the switch to perform auto negotiation if there is a real
  468. * phy. However, in the case of a fixed link phy, we force the port
  469. * settings from the fixed link settings.
  470. */
  471. void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
  472. struct phy_device *phydev)
  473. {
  474. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  475. u32 ret, reg;
  476. if (!phy_is_pseudo_fixed_link(phydev))
  477. return;
  478. mutex_lock(&ps->smi_mutex);
  479. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
  480. if (ret < 0)
  481. goto out;
  482. reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
  483. PORT_PCS_CTRL_FORCE_LINK |
  484. PORT_PCS_CTRL_DUPLEX_FULL |
  485. PORT_PCS_CTRL_FORCE_DUPLEX |
  486. PORT_PCS_CTRL_UNFORCED);
  487. reg |= PORT_PCS_CTRL_FORCE_LINK;
  488. if (phydev->link)
  489. reg |= PORT_PCS_CTRL_LINK_UP;
  490. if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
  491. goto out;
  492. switch (phydev->speed) {
  493. case SPEED_1000:
  494. reg |= PORT_PCS_CTRL_1000;
  495. break;
  496. case SPEED_100:
  497. reg |= PORT_PCS_CTRL_100;
  498. break;
  499. case SPEED_10:
  500. reg |= PORT_PCS_CTRL_10;
  501. break;
  502. default:
  503. pr_info("Unknown speed");
  504. goto out;
  505. }
  506. reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
  507. if (phydev->duplex == DUPLEX_FULL)
  508. reg |= PORT_PCS_CTRL_DUPLEX_FULL;
  509. if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
  510. (port >= ps->num_ports - 2)) {
  511. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
  512. reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
  513. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
  514. reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
  515. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
  516. reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
  517. PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
  518. }
  519. _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
  520. out:
  521. mutex_unlock(&ps->smi_mutex);
  522. }
  523. /* Must be called with SMI mutex held */
  524. static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
  525. {
  526. int ret;
  527. int i;
  528. for (i = 0; i < 10; i++) {
  529. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
  530. if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
  531. return 0;
  532. }
  533. return -ETIMEDOUT;
  534. }
  535. /* Must be called with SMI mutex held */
  536. static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
  537. {
  538. int ret;
  539. if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
  540. port = (port + 1) << 5;
  541. /* Snapshot the hardware statistics counters for this port. */
  542. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
  543. GLOBAL_STATS_OP_CAPTURE_PORT |
  544. GLOBAL_STATS_OP_HIST_RX_TX | port);
  545. if (ret < 0)
  546. return ret;
  547. /* Wait for the snapshotting to complete. */
  548. ret = _mv88e6xxx_stats_wait(ds);
  549. if (ret < 0)
  550. return ret;
  551. return 0;
  552. }
  553. /* Must be called with SMI mutex held */
  554. static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
  555. {
  556. u32 _val;
  557. int ret;
  558. *val = 0;
  559. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
  560. GLOBAL_STATS_OP_READ_CAPTURED |
  561. GLOBAL_STATS_OP_HIST_RX_TX | stat);
  562. if (ret < 0)
  563. return;
  564. ret = _mv88e6xxx_stats_wait(ds);
  565. if (ret < 0)
  566. return;
  567. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
  568. if (ret < 0)
  569. return;
  570. _val = ret << 16;
  571. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
  572. if (ret < 0)
  573. return;
  574. *val = _val | ret;
  575. }
  576. static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
  577. { "in_good_octets", 8, 0x00, },
  578. { "in_bad_octets", 4, 0x02, },
  579. { "in_unicast", 4, 0x04, },
  580. { "in_broadcasts", 4, 0x06, },
  581. { "in_multicasts", 4, 0x07, },
  582. { "in_pause", 4, 0x16, },
  583. { "in_undersize", 4, 0x18, },
  584. { "in_fragments", 4, 0x19, },
  585. { "in_oversize", 4, 0x1a, },
  586. { "in_jabber", 4, 0x1b, },
  587. { "in_rx_error", 4, 0x1c, },
  588. { "in_fcs_error", 4, 0x1d, },
  589. { "out_octets", 8, 0x0e, },
  590. { "out_unicast", 4, 0x10, },
  591. { "out_broadcasts", 4, 0x13, },
  592. { "out_multicasts", 4, 0x12, },
  593. { "out_pause", 4, 0x15, },
  594. { "excessive", 4, 0x11, },
  595. { "collisions", 4, 0x1e, },
  596. { "deferred", 4, 0x05, },
  597. { "single", 4, 0x14, },
  598. { "multiple", 4, 0x17, },
  599. { "out_fcs_error", 4, 0x03, },
  600. { "late", 4, 0x1f, },
  601. { "hist_64bytes", 4, 0x08, },
  602. { "hist_65_127bytes", 4, 0x09, },
  603. { "hist_128_255bytes", 4, 0x0a, },
  604. { "hist_256_511bytes", 4, 0x0b, },
  605. { "hist_512_1023bytes", 4, 0x0c, },
  606. { "hist_1024_max_bytes", 4, 0x0d, },
  607. /* Not all devices have the following counters */
  608. { "sw_in_discards", 4, 0x110, },
  609. { "sw_in_filtered", 2, 0x112, },
  610. { "sw_out_filtered", 2, 0x113, },
  611. };
  612. static bool have_sw_in_discards(struct dsa_switch *ds)
  613. {
  614. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  615. switch (ps->id) {
  616. case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
  617. case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
  618. case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
  619. case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
  620. case PORT_SWITCH_ID_6352:
  621. return true;
  622. default:
  623. return false;
  624. }
  625. }
  626. static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
  627. int nr_stats,
  628. struct mv88e6xxx_hw_stat *stats,
  629. int port, uint8_t *data)
  630. {
  631. int i;
  632. for (i = 0; i < nr_stats; i++) {
  633. memcpy(data + i * ETH_GSTRING_LEN,
  634. stats[i].string, ETH_GSTRING_LEN);
  635. }
  636. }
  637. static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
  638. int stat,
  639. struct mv88e6xxx_hw_stat *stats,
  640. int port)
  641. {
  642. struct mv88e6xxx_hw_stat *s = stats + stat;
  643. u32 low;
  644. u32 high = 0;
  645. int ret;
  646. u64 value;
  647. if (s->reg >= 0x100) {
  648. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
  649. s->reg - 0x100);
  650. if (ret < 0)
  651. return UINT64_MAX;
  652. low = ret;
  653. if (s->sizeof_stat == 4) {
  654. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
  655. s->reg - 0x100 + 1);
  656. if (ret < 0)
  657. return UINT64_MAX;
  658. high = ret;
  659. }
  660. } else {
  661. _mv88e6xxx_stats_read(ds, s->reg, &low);
  662. if (s->sizeof_stat == 8)
  663. _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
  664. }
  665. value = (((u64)high) << 16) | low;
  666. return value;
  667. }
  668. static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
  669. int nr_stats,
  670. struct mv88e6xxx_hw_stat *stats,
  671. int port, uint64_t *data)
  672. {
  673. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  674. int ret;
  675. int i;
  676. mutex_lock(&ps->smi_mutex);
  677. ret = _mv88e6xxx_stats_snapshot(ds, port);
  678. if (ret < 0) {
  679. mutex_unlock(&ps->smi_mutex);
  680. return;
  681. }
  682. /* Read each of the counters. */
  683. for (i = 0; i < nr_stats; i++)
  684. data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
  685. mutex_unlock(&ps->smi_mutex);
  686. }
  687. /* All the statistics in the table */
  688. void
  689. mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
  690. {
  691. if (have_sw_in_discards(ds))
  692. _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
  693. mv88e6xxx_hw_stats, port, data);
  694. else
  695. _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
  696. mv88e6xxx_hw_stats, port, data);
  697. }
  698. int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
  699. {
  700. if (have_sw_in_discards(ds))
  701. return ARRAY_SIZE(mv88e6xxx_hw_stats);
  702. return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
  703. }
  704. void
  705. mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
  706. int port, uint64_t *data)
  707. {
  708. if (have_sw_in_discards(ds))
  709. _mv88e6xxx_get_ethtool_stats(
  710. ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
  711. mv88e6xxx_hw_stats, port, data);
  712. else
  713. _mv88e6xxx_get_ethtool_stats(
  714. ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
  715. mv88e6xxx_hw_stats, port, data);
  716. }
  717. int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
  718. {
  719. return 32 * sizeof(u16);
  720. }
  721. void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
  722. struct ethtool_regs *regs, void *_p)
  723. {
  724. u16 *p = _p;
  725. int i;
  726. regs->version = 0;
  727. memset(p, 0xff, 32 * sizeof(u16));
  728. for (i = 0; i < 32; i++) {
  729. int ret;
  730. ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
  731. if (ret >= 0)
  732. p[i] = ret;
  733. }
  734. }
  735. /* Must be called with SMI lock held */
  736. static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
  737. u16 mask)
  738. {
  739. unsigned long timeout = jiffies + HZ / 10;
  740. while (time_before(jiffies, timeout)) {
  741. int ret;
  742. ret = _mv88e6xxx_reg_read(ds, reg, offset);
  743. if (ret < 0)
  744. return ret;
  745. if (!(ret & mask))
  746. return 0;
  747. usleep_range(1000, 2000);
  748. }
  749. return -ETIMEDOUT;
  750. }
  751. static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
  752. {
  753. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  754. int ret;
  755. mutex_lock(&ps->smi_mutex);
  756. ret = _mv88e6xxx_wait(ds, reg, offset, mask);
  757. mutex_unlock(&ps->smi_mutex);
  758. return ret;
  759. }
  760. static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
  761. {
  762. return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
  763. GLOBAL2_SMI_OP_BUSY);
  764. }
  765. int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
  766. {
  767. return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
  768. GLOBAL2_EEPROM_OP_LOAD);
  769. }
  770. int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
  771. {
  772. return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
  773. GLOBAL2_EEPROM_OP_BUSY);
  774. }
  775. /* Must be called with SMI lock held */
  776. static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
  777. {
  778. return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
  779. GLOBAL_ATU_OP_BUSY);
  780. }
  781. /* Must be called with SMI lock held */
  782. static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds)
  783. {
  784. return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
  785. GLOBAL2_SCRATCH_BUSY);
  786. }
  787. /* Must be called with SMI mutex held */
  788. static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
  789. int regnum)
  790. {
  791. int ret;
  792. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
  793. GLOBAL2_SMI_OP_22_READ | (addr << 5) |
  794. regnum);
  795. if (ret < 0)
  796. return ret;
  797. ret = _mv88e6xxx_phy_wait(ds);
  798. if (ret < 0)
  799. return ret;
  800. return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
  801. }
  802. /* Must be called with SMI mutex held */
  803. static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
  804. int regnum, u16 val)
  805. {
  806. int ret;
  807. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
  808. if (ret < 0)
  809. return ret;
  810. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
  811. GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
  812. regnum);
  813. return _mv88e6xxx_phy_wait(ds);
  814. }
  815. int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
  816. {
  817. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  818. int reg;
  819. mutex_lock(&ps->smi_mutex);
  820. reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
  821. if (reg < 0)
  822. goto out;
  823. e->eee_enabled = !!(reg & 0x0200);
  824. e->tx_lpi_enabled = !!(reg & 0x0100);
  825. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
  826. if (reg < 0)
  827. goto out;
  828. e->eee_active = !!(reg & PORT_STATUS_EEE);
  829. reg = 0;
  830. out:
  831. mutex_unlock(&ps->smi_mutex);
  832. return reg;
  833. }
  834. int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
  835. struct phy_device *phydev, struct ethtool_eee *e)
  836. {
  837. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  838. int reg;
  839. int ret;
  840. mutex_lock(&ps->smi_mutex);
  841. ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
  842. if (ret < 0)
  843. goto out;
  844. reg = ret & ~0x0300;
  845. if (e->eee_enabled)
  846. reg |= 0x0200;
  847. if (e->tx_lpi_enabled)
  848. reg |= 0x0100;
  849. ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
  850. out:
  851. mutex_unlock(&ps->smi_mutex);
  852. return ret;
  853. }
  854. static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
  855. {
  856. int ret;
  857. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
  858. if (ret < 0)
  859. return ret;
  860. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
  861. if (ret < 0)
  862. return ret;
  863. return _mv88e6xxx_atu_wait(ds);
  864. }
  865. static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
  866. {
  867. int ret;
  868. ret = _mv88e6xxx_atu_wait(ds);
  869. if (ret < 0)
  870. return ret;
  871. return _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB);
  872. }
  873. static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
  874. {
  875. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  876. int reg, ret = 0;
  877. u8 oldstate;
  878. mutex_lock(&ps->smi_mutex);
  879. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
  880. if (reg < 0) {
  881. ret = reg;
  882. goto abort;
  883. }
  884. oldstate = reg & PORT_CONTROL_STATE_MASK;
  885. if (oldstate != state) {
  886. /* Flush forwarding database if we're moving a port
  887. * from Learning or Forwarding state to Disabled or
  888. * Blocking or Listening state.
  889. */
  890. if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
  891. state <= PORT_CONTROL_STATE_BLOCKING) {
  892. ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
  893. if (ret)
  894. goto abort;
  895. }
  896. reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
  897. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
  898. reg);
  899. }
  900. abort:
  901. mutex_unlock(&ps->smi_mutex);
  902. return ret;
  903. }
  904. /* Must be called with smi lock held */
  905. static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
  906. {
  907. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  908. u8 fid = ps->fid[port];
  909. u16 reg = fid << 12;
  910. if (dsa_is_cpu_port(ds, port))
  911. reg |= ds->phys_port_mask;
  912. else
  913. reg |= (ps->bridge_mask[fid] |
  914. (1 << dsa_upstream_port(ds))) & ~(1 << port);
  915. return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
  916. }
  917. /* Must be called with smi lock held */
  918. static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
  919. {
  920. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  921. int port;
  922. u32 mask;
  923. int ret;
  924. mask = ds->phys_port_mask;
  925. while (mask) {
  926. port = __ffs(mask);
  927. mask &= ~(1 << port);
  928. if (ps->fid[port] != fid)
  929. continue;
  930. ret = _mv88e6xxx_update_port_config(ds, port);
  931. if (ret)
  932. return ret;
  933. }
  934. return _mv88e6xxx_flush_fid(ds, fid);
  935. }
  936. /* Bridge handling functions */
  937. int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
  938. {
  939. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  940. int ret = 0;
  941. u32 nmask;
  942. int fid;
  943. /* If the bridge group is not empty, join that group.
  944. * Otherwise create a new group.
  945. */
  946. fid = ps->fid[port];
  947. nmask = br_port_mask & ~(1 << port);
  948. if (nmask)
  949. fid = ps->fid[__ffs(nmask)];
  950. nmask = ps->bridge_mask[fid] | (1 << port);
  951. if (nmask != br_port_mask) {
  952. netdev_err(ds->ports[port],
  953. "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
  954. fid, br_port_mask, nmask);
  955. return -EINVAL;
  956. }
  957. mutex_lock(&ps->smi_mutex);
  958. ps->bridge_mask[fid] = br_port_mask;
  959. if (fid != ps->fid[port]) {
  960. clear_bit(ps->fid[port], ps->fid_bitmap);
  961. ps->fid[port] = fid;
  962. ret = _mv88e6xxx_update_bridge_config(ds, fid);
  963. }
  964. mutex_unlock(&ps->smi_mutex);
  965. return ret;
  966. }
  967. int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
  968. {
  969. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  970. u8 fid, newfid;
  971. int ret;
  972. fid = ps->fid[port];
  973. if (ps->bridge_mask[fid] != br_port_mask) {
  974. netdev_err(ds->ports[port],
  975. "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
  976. fid, br_port_mask, ps->bridge_mask[fid]);
  977. return -EINVAL;
  978. }
  979. /* If the port was the last port of a bridge, we are done.
  980. * Otherwise assign a new fid to the port, and fix up
  981. * the bridge configuration.
  982. */
  983. if (br_port_mask == (1 << port))
  984. return 0;
  985. mutex_lock(&ps->smi_mutex);
  986. newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
  987. if (unlikely(newfid > ps->num_ports)) {
  988. netdev_err(ds->ports[port], "all first %d FIDs are used\n",
  989. ps->num_ports);
  990. ret = -ENOSPC;
  991. goto unlock;
  992. }
  993. ps->fid[port] = newfid;
  994. set_bit(newfid, ps->fid_bitmap);
  995. ps->bridge_mask[fid] &= ~(1 << port);
  996. ps->bridge_mask[newfid] = 1 << port;
  997. ret = _mv88e6xxx_update_bridge_config(ds, fid);
  998. if (!ret)
  999. ret = _mv88e6xxx_update_bridge_config(ds, newfid);
  1000. unlock:
  1001. mutex_unlock(&ps->smi_mutex);
  1002. return ret;
  1003. }
  1004. int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
  1005. {
  1006. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1007. int stp_state;
  1008. switch (state) {
  1009. case BR_STATE_DISABLED:
  1010. stp_state = PORT_CONTROL_STATE_DISABLED;
  1011. break;
  1012. case BR_STATE_BLOCKING:
  1013. case BR_STATE_LISTENING:
  1014. stp_state = PORT_CONTROL_STATE_BLOCKING;
  1015. break;
  1016. case BR_STATE_LEARNING:
  1017. stp_state = PORT_CONTROL_STATE_LEARNING;
  1018. break;
  1019. case BR_STATE_FORWARDING:
  1020. default:
  1021. stp_state = PORT_CONTROL_STATE_FORWARDING;
  1022. break;
  1023. }
  1024. netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
  1025. /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
  1026. * so we can not update the port state directly but need to schedule it.
  1027. */
  1028. ps->port_state[port] = stp_state;
  1029. set_bit(port, &ps->port_state_update_mask);
  1030. schedule_work(&ps->bridge_work);
  1031. return 0;
  1032. }
  1033. int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
  1034. {
  1035. int ret;
  1036. ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
  1037. if (ret < 0)
  1038. return ret;
  1039. *pvid = ret & PORT_DEFAULT_VLAN_MASK;
  1040. return 0;
  1041. }
  1042. int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
  1043. {
  1044. return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
  1045. pvid & PORT_DEFAULT_VLAN_MASK);
  1046. }
  1047. static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
  1048. {
  1049. return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
  1050. GLOBAL_VTU_OP_BUSY);
  1051. }
  1052. static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
  1053. {
  1054. int ret;
  1055. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
  1056. if (ret < 0)
  1057. return ret;
  1058. return _mv88e6xxx_vtu_wait(ds);
  1059. }
  1060. static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
  1061. {
  1062. int ret;
  1063. ret = _mv88e6xxx_vtu_wait(ds);
  1064. if (ret < 0)
  1065. return ret;
  1066. return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
  1067. }
  1068. static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
  1069. struct mv88e6xxx_vtu_stu_entry *entry,
  1070. unsigned int nibble_offset)
  1071. {
  1072. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1073. u16 regs[3];
  1074. int i;
  1075. int ret;
  1076. for (i = 0; i < 3; ++i) {
  1077. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1078. GLOBAL_VTU_DATA_0_3 + i);
  1079. if (ret < 0)
  1080. return ret;
  1081. regs[i] = ret;
  1082. }
  1083. for (i = 0; i < ps->num_ports; ++i) {
  1084. unsigned int shift = (i % 4) * 4 + nibble_offset;
  1085. u16 reg = regs[i / 4];
  1086. entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
  1087. }
  1088. return 0;
  1089. }
  1090. static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
  1091. struct mv88e6xxx_vtu_stu_entry *entry,
  1092. unsigned int nibble_offset)
  1093. {
  1094. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1095. u16 regs[3] = { 0 };
  1096. int i;
  1097. int ret;
  1098. for (i = 0; i < ps->num_ports; ++i) {
  1099. unsigned int shift = (i % 4) * 4 + nibble_offset;
  1100. u8 data = entry->data[i];
  1101. regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
  1102. }
  1103. for (i = 0; i < 3; ++i) {
  1104. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
  1105. GLOBAL_VTU_DATA_0_3 + i, regs[i]);
  1106. if (ret < 0)
  1107. return ret;
  1108. }
  1109. return 0;
  1110. }
  1111. static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
  1112. struct mv88e6xxx_vtu_stu_entry *entry)
  1113. {
  1114. struct mv88e6xxx_vtu_stu_entry next = { 0 };
  1115. int ret;
  1116. ret = _mv88e6xxx_vtu_wait(ds);
  1117. if (ret < 0)
  1118. return ret;
  1119. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
  1120. vid & GLOBAL_VTU_VID_MASK);
  1121. if (ret < 0)
  1122. return ret;
  1123. ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
  1124. if (ret < 0)
  1125. return ret;
  1126. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
  1127. if (ret < 0)
  1128. return ret;
  1129. next.vid = ret & GLOBAL_VTU_VID_MASK;
  1130. next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
  1131. if (next.valid) {
  1132. ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
  1133. if (ret < 0)
  1134. return ret;
  1135. if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
  1136. mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
  1137. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1138. GLOBAL_VTU_FID);
  1139. if (ret < 0)
  1140. return ret;
  1141. next.fid = ret & GLOBAL_VTU_FID_MASK;
  1142. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1143. GLOBAL_VTU_SID);
  1144. if (ret < 0)
  1145. return ret;
  1146. next.sid = ret & GLOBAL_VTU_SID_MASK;
  1147. }
  1148. }
  1149. *entry = next;
  1150. return 0;
  1151. }
  1152. static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
  1153. struct mv88e6xxx_vtu_stu_entry *entry)
  1154. {
  1155. u16 reg = 0;
  1156. int ret;
  1157. ret = _mv88e6xxx_vtu_wait(ds);
  1158. if (ret < 0)
  1159. return ret;
  1160. if (!entry->valid)
  1161. goto loadpurge;
  1162. /* Write port member tags */
  1163. ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
  1164. if (ret < 0)
  1165. return ret;
  1166. if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
  1167. mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
  1168. reg = entry->sid & GLOBAL_VTU_SID_MASK;
  1169. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
  1170. if (ret < 0)
  1171. return ret;
  1172. reg = entry->fid & GLOBAL_VTU_FID_MASK;
  1173. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
  1174. if (ret < 0)
  1175. return ret;
  1176. }
  1177. reg = GLOBAL_VTU_VID_VALID;
  1178. loadpurge:
  1179. reg |= entry->vid & GLOBAL_VTU_VID_MASK;
  1180. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
  1181. if (ret < 0)
  1182. return ret;
  1183. return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
  1184. }
  1185. static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
  1186. struct mv88e6xxx_vtu_stu_entry *entry)
  1187. {
  1188. struct mv88e6xxx_vtu_stu_entry next = { 0 };
  1189. int ret;
  1190. ret = _mv88e6xxx_vtu_wait(ds);
  1191. if (ret < 0)
  1192. return ret;
  1193. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
  1194. sid & GLOBAL_VTU_SID_MASK);
  1195. if (ret < 0)
  1196. return ret;
  1197. ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
  1198. if (ret < 0)
  1199. return ret;
  1200. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
  1201. if (ret < 0)
  1202. return ret;
  1203. next.sid = ret & GLOBAL_VTU_SID_MASK;
  1204. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
  1205. if (ret < 0)
  1206. return ret;
  1207. next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
  1208. if (next.valid) {
  1209. ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
  1210. if (ret < 0)
  1211. return ret;
  1212. }
  1213. *entry = next;
  1214. return 0;
  1215. }
  1216. static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
  1217. struct mv88e6xxx_vtu_stu_entry *entry)
  1218. {
  1219. u16 reg = 0;
  1220. int ret;
  1221. ret = _mv88e6xxx_vtu_wait(ds);
  1222. if (ret < 0)
  1223. return ret;
  1224. if (!entry->valid)
  1225. goto loadpurge;
  1226. /* Write port states */
  1227. ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
  1228. if (ret < 0)
  1229. return ret;
  1230. reg = GLOBAL_VTU_VID_VALID;
  1231. loadpurge:
  1232. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
  1233. if (ret < 0)
  1234. return ret;
  1235. reg = entry->sid & GLOBAL_VTU_SID_MASK;
  1236. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
  1237. if (ret < 0)
  1238. return ret;
  1239. return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
  1240. }
  1241. static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
  1242. struct mv88e6xxx_vtu_stu_entry *entry)
  1243. {
  1244. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1245. struct mv88e6xxx_vtu_stu_entry vlan = {
  1246. .valid = true,
  1247. .vid = vid,
  1248. };
  1249. int i;
  1250. /* exclude all ports except the CPU */
  1251. for (i = 0; i < ps->num_ports; ++i)
  1252. vlan.data[i] = dsa_is_cpu_port(ds, i) ?
  1253. GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED :
  1254. GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
  1255. if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
  1256. mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
  1257. struct mv88e6xxx_vtu_stu_entry vstp;
  1258. int err;
  1259. /* Adding a VTU entry requires a valid STU entry. As VSTP is not
  1260. * implemented, only one STU entry is needed to cover all VTU
  1261. * entries. Thus, validate the SID 0.
  1262. */
  1263. vlan.sid = 0;
  1264. err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
  1265. if (err)
  1266. return err;
  1267. if (vstp.sid != vlan.sid || !vstp.valid) {
  1268. memset(&vstp, 0, sizeof(vstp));
  1269. vstp.valid = true;
  1270. vstp.sid = vlan.sid;
  1271. err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
  1272. if (err)
  1273. return err;
  1274. }
  1275. /* Non-bridged ports and bridge groups use FIDs from 1 to
  1276. * num_ports; VLANs use FIDs from num_ports+1 to 4095.
  1277. */
  1278. vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID,
  1279. ps->num_ports + 1);
  1280. if (unlikely(vlan.fid == VLAN_N_VID)) {
  1281. pr_err("no more FID available for VLAN %d\n", vid);
  1282. return -ENOSPC;
  1283. }
  1284. err = _mv88e6xxx_flush_fid(ds, vlan.fid);
  1285. if (err)
  1286. return err;
  1287. set_bit(vlan.fid, ps->fid_bitmap);
  1288. }
  1289. *entry = vlan;
  1290. return 0;
  1291. }
  1292. int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
  1293. bool untagged)
  1294. {
  1295. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1296. struct mv88e6xxx_vtu_stu_entry vlan;
  1297. int err;
  1298. mutex_lock(&ps->smi_mutex);
  1299. err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
  1300. if (err)
  1301. goto unlock;
  1302. if (vlan.vid != vid || !vlan.valid) {
  1303. err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
  1304. if (err)
  1305. goto unlock;
  1306. }
  1307. vlan.data[port] = untagged ?
  1308. GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
  1309. GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
  1310. err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
  1311. unlock:
  1312. mutex_unlock(&ps->smi_mutex);
  1313. return err;
  1314. }
  1315. int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
  1316. {
  1317. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1318. struct mv88e6xxx_vtu_stu_entry vlan;
  1319. bool keep = false;
  1320. int i, err;
  1321. mutex_lock(&ps->smi_mutex);
  1322. err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
  1323. if (err)
  1324. goto unlock;
  1325. if (vlan.vid != vid || !vlan.valid ||
  1326. vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
  1327. err = -ENOENT;
  1328. goto unlock;
  1329. }
  1330. vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
  1331. /* keep the VLAN unless all ports are excluded */
  1332. for (i = 0; i < ps->num_ports; ++i) {
  1333. if (dsa_is_cpu_port(ds, i))
  1334. continue;
  1335. if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
  1336. keep = true;
  1337. break;
  1338. }
  1339. }
  1340. vlan.valid = keep;
  1341. err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
  1342. if (err)
  1343. goto unlock;
  1344. if (!keep)
  1345. clear_bit(vlan.fid, ps->fid_bitmap);
  1346. unlock:
  1347. mutex_unlock(&ps->smi_mutex);
  1348. return err;
  1349. }
  1350. static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
  1351. struct mv88e6xxx_vtu_stu_entry *entry)
  1352. {
  1353. int err;
  1354. do {
  1355. if (vid == 4095)
  1356. return -ENOENT;
  1357. err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
  1358. if (err)
  1359. return err;
  1360. if (!entry->valid)
  1361. return -ENOENT;
  1362. vid = entry->vid;
  1363. } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
  1364. entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
  1365. return 0;
  1366. }
  1367. int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
  1368. unsigned long *ports, unsigned long *untagged)
  1369. {
  1370. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1371. struct mv88e6xxx_vtu_stu_entry next;
  1372. int port;
  1373. int err;
  1374. if (*vid == 4095)
  1375. return -ENOENT;
  1376. mutex_lock(&ps->smi_mutex);
  1377. err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
  1378. mutex_unlock(&ps->smi_mutex);
  1379. if (err)
  1380. return err;
  1381. if (!next.valid)
  1382. return -ENOENT;
  1383. *vid = next.vid;
  1384. for (port = 0; port < ps->num_ports; ++port) {
  1385. clear_bit(port, ports);
  1386. clear_bit(port, untagged);
  1387. if (dsa_is_cpu_port(ds, port))
  1388. continue;
  1389. if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
  1390. next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
  1391. set_bit(port, ports);
  1392. if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
  1393. set_bit(port, untagged);
  1394. }
  1395. return 0;
  1396. }
  1397. static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
  1398. const unsigned char *addr)
  1399. {
  1400. int i, ret;
  1401. for (i = 0; i < 3; i++) {
  1402. ret = _mv88e6xxx_reg_write(
  1403. ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
  1404. (addr[i * 2] << 8) | addr[i * 2 + 1]);
  1405. if (ret < 0)
  1406. return ret;
  1407. }
  1408. return 0;
  1409. }
  1410. static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
  1411. {
  1412. int i, ret;
  1413. for (i = 0; i < 3; i++) {
  1414. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1415. GLOBAL_ATU_MAC_01 + i);
  1416. if (ret < 0)
  1417. return ret;
  1418. addr[i * 2] = ret >> 8;
  1419. addr[i * 2 + 1] = ret & 0xff;
  1420. }
  1421. return 0;
  1422. }
  1423. static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
  1424. struct mv88e6xxx_atu_entry *entry)
  1425. {
  1426. u16 reg = 0;
  1427. int ret;
  1428. ret = _mv88e6xxx_atu_wait(ds);
  1429. if (ret < 0)
  1430. return ret;
  1431. ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
  1432. if (ret < 0)
  1433. return ret;
  1434. if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
  1435. unsigned int mask, shift;
  1436. if (entry->trunk) {
  1437. reg |= GLOBAL_ATU_DATA_TRUNK;
  1438. mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
  1439. shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
  1440. } else {
  1441. mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
  1442. shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
  1443. }
  1444. reg |= (entry->portv_trunkid << shift) & mask;
  1445. }
  1446. reg |= entry->state & GLOBAL_ATU_DATA_STATE_MASK;
  1447. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, reg);
  1448. if (ret < 0)
  1449. return ret;
  1450. return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
  1451. }
  1452. static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
  1453. {
  1454. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1455. struct mv88e6xxx_vtu_stu_entry vlan;
  1456. int err;
  1457. if (vid == 0)
  1458. return ps->fid[port];
  1459. err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan);
  1460. if (err)
  1461. return err;
  1462. if (vlan.vid == vid)
  1463. return vlan.fid;
  1464. return -ENOENT;
  1465. }
  1466. static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
  1467. const unsigned char *addr, u16 vid,
  1468. u8 state)
  1469. {
  1470. struct mv88e6xxx_atu_entry entry = { 0 };
  1471. int ret;
  1472. ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
  1473. if (ret < 0)
  1474. return ret;
  1475. entry.fid = ret;
  1476. entry.state = state;
  1477. ether_addr_copy(entry.mac, addr);
  1478. if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
  1479. entry.trunk = false;
  1480. entry.portv_trunkid = BIT(port);
  1481. }
  1482. return _mv88e6xxx_atu_load(ds, &entry);
  1483. }
  1484. int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
  1485. const unsigned char *addr, u16 vid)
  1486. {
  1487. int state = is_multicast_ether_addr(addr) ?
  1488. GLOBAL_ATU_DATA_STATE_MC_STATIC :
  1489. GLOBAL_ATU_DATA_STATE_UC_STATIC;
  1490. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1491. int ret;
  1492. mutex_lock(&ps->smi_mutex);
  1493. ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
  1494. mutex_unlock(&ps->smi_mutex);
  1495. return ret;
  1496. }
  1497. int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
  1498. const unsigned char *addr, u16 vid)
  1499. {
  1500. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1501. int ret;
  1502. mutex_lock(&ps->smi_mutex);
  1503. ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
  1504. GLOBAL_ATU_DATA_STATE_UNUSED);
  1505. mutex_unlock(&ps->smi_mutex);
  1506. return ret;
  1507. }
  1508. static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
  1509. const unsigned char *addr,
  1510. struct mv88e6xxx_atu_entry *entry)
  1511. {
  1512. struct mv88e6xxx_atu_entry next = { 0 };
  1513. int ret;
  1514. next.fid = fid;
  1515. ret = _mv88e6xxx_atu_wait(ds);
  1516. if (ret < 0)
  1517. return ret;
  1518. ret = _mv88e6xxx_atu_mac_write(ds, addr);
  1519. if (ret < 0)
  1520. return ret;
  1521. ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
  1522. if (ret < 0)
  1523. return ret;
  1524. ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
  1525. if (ret < 0)
  1526. return ret;
  1527. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
  1528. if (ret < 0)
  1529. return ret;
  1530. next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
  1531. if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
  1532. unsigned int mask, shift;
  1533. if (ret & GLOBAL_ATU_DATA_TRUNK) {
  1534. next.trunk = true;
  1535. mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
  1536. shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
  1537. } else {
  1538. next.trunk = false;
  1539. mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
  1540. shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
  1541. }
  1542. next.portv_trunkid = (ret & mask) >> shift;
  1543. }
  1544. *entry = next;
  1545. return 0;
  1546. }
  1547. /* get next entry for port */
  1548. int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
  1549. unsigned char *addr, u16 *vid, bool *is_static)
  1550. {
  1551. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1552. struct mv88e6xxx_atu_entry next;
  1553. u16 fid;
  1554. int ret;
  1555. mutex_lock(&ps->smi_mutex);
  1556. ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
  1557. if (ret < 0)
  1558. goto unlock;
  1559. fid = ret;
  1560. do {
  1561. if (is_broadcast_ether_addr(addr)) {
  1562. struct mv88e6xxx_vtu_stu_entry vtu;
  1563. ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
  1564. if (ret < 0)
  1565. goto unlock;
  1566. *vid = vtu.vid;
  1567. fid = vtu.fid;
  1568. }
  1569. ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
  1570. if (ret < 0)
  1571. goto unlock;
  1572. ether_addr_copy(addr, next.mac);
  1573. if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
  1574. continue;
  1575. } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
  1576. *is_static = next.state == (is_multicast_ether_addr(addr) ?
  1577. GLOBAL_ATU_DATA_STATE_MC_STATIC :
  1578. GLOBAL_ATU_DATA_STATE_UC_STATIC);
  1579. unlock:
  1580. mutex_unlock(&ps->smi_mutex);
  1581. return ret;
  1582. }
  1583. static void mv88e6xxx_bridge_work(struct work_struct *work)
  1584. {
  1585. struct mv88e6xxx_priv_state *ps;
  1586. struct dsa_switch *ds;
  1587. int port;
  1588. ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
  1589. ds = ((struct dsa_switch *)ps) - 1;
  1590. while (ps->port_state_update_mask) {
  1591. port = __ffs(ps->port_state_update_mask);
  1592. clear_bit(port, &ps->port_state_update_mask);
  1593. mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
  1594. }
  1595. }
  1596. static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
  1597. {
  1598. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1599. int ret, fid;
  1600. u16 reg;
  1601. mutex_lock(&ps->smi_mutex);
  1602. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1603. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1604. mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
  1605. mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
  1606. /* MAC Forcing register: don't force link, speed,
  1607. * duplex or flow control state to any particular
  1608. * values on physical ports, but force the CPU port
  1609. * and all DSA ports to their maximum bandwidth and
  1610. * full duplex.
  1611. */
  1612. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
  1613. if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
  1614. reg &= ~PORT_PCS_CTRL_UNFORCED;
  1615. reg |= PORT_PCS_CTRL_FORCE_LINK |
  1616. PORT_PCS_CTRL_LINK_UP |
  1617. PORT_PCS_CTRL_DUPLEX_FULL |
  1618. PORT_PCS_CTRL_FORCE_DUPLEX;
  1619. if (mv88e6xxx_6065_family(ds))
  1620. reg |= PORT_PCS_CTRL_100;
  1621. else
  1622. reg |= PORT_PCS_CTRL_1000;
  1623. } else {
  1624. reg |= PORT_PCS_CTRL_UNFORCED;
  1625. }
  1626. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1627. PORT_PCS_CTRL, reg);
  1628. if (ret)
  1629. goto abort;
  1630. }
  1631. /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
  1632. * disable Header mode, enable IGMP/MLD snooping, disable VLAN
  1633. * tunneling, determine priority by looking at 802.1p and IP
  1634. * priority fields (IP prio has precedence), and set STP state
  1635. * to Forwarding.
  1636. *
  1637. * If this is the CPU link, use DSA or EDSA tagging depending
  1638. * on which tagging mode was configured.
  1639. *
  1640. * If this is a link to another switch, use DSA tagging mode.
  1641. *
  1642. * If this is the upstream port for this switch, enable
  1643. * forwarding of unknown unicasts and multicasts.
  1644. */
  1645. reg = 0;
  1646. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1647. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1648. mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
  1649. mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
  1650. reg = PORT_CONTROL_IGMP_MLD_SNOOP |
  1651. PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
  1652. PORT_CONTROL_STATE_FORWARDING;
  1653. if (dsa_is_cpu_port(ds, port)) {
  1654. if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
  1655. reg |= PORT_CONTROL_DSA_TAG;
  1656. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1657. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1658. mv88e6xxx_6320_family(ds)) {
  1659. if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
  1660. reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
  1661. else
  1662. reg |= PORT_CONTROL_FRAME_MODE_DSA;
  1663. reg |= PORT_CONTROL_FORWARD_UNKNOWN |
  1664. PORT_CONTROL_FORWARD_UNKNOWN_MC;
  1665. }
  1666. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1667. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1668. mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
  1669. mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
  1670. if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
  1671. reg |= PORT_CONTROL_EGRESS_ADD_TAG;
  1672. }
  1673. }
  1674. if (dsa_is_dsa_port(ds, port)) {
  1675. if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
  1676. reg |= PORT_CONTROL_DSA_TAG;
  1677. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1678. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1679. mv88e6xxx_6320_family(ds)) {
  1680. reg |= PORT_CONTROL_FRAME_MODE_DSA;
  1681. }
  1682. if (port == dsa_upstream_port(ds))
  1683. reg |= PORT_CONTROL_FORWARD_UNKNOWN |
  1684. PORT_CONTROL_FORWARD_UNKNOWN_MC;
  1685. }
  1686. if (reg) {
  1687. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1688. PORT_CONTROL, reg);
  1689. if (ret)
  1690. goto abort;
  1691. }
  1692. /* Port Control 2: don't force a good FCS, set the maximum frame size to
  1693. * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
  1694. * untagged frames on this port, do a destination address lookup on all
  1695. * received packets as usual, disable ARP mirroring and don't send a
  1696. * copy of all transmitted/received frames on this port to the CPU.
  1697. */
  1698. reg = 0;
  1699. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1700. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1701. mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
  1702. reg = PORT_CONTROL_2_MAP_DA;
  1703. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1704. mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
  1705. reg |= PORT_CONTROL_2_JUMBO_10240;
  1706. if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
  1707. /* Set the upstream port this port should use */
  1708. reg |= dsa_upstream_port(ds);
  1709. /* enable forwarding of unknown multicast addresses to
  1710. * the upstream port
  1711. */
  1712. if (port == dsa_upstream_port(ds))
  1713. reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
  1714. }
  1715. reg |= PORT_CONTROL_2_8021Q_FALLBACK;
  1716. if (reg) {
  1717. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1718. PORT_CONTROL_2, reg);
  1719. if (ret)
  1720. goto abort;
  1721. }
  1722. /* Port Association Vector: when learning source addresses
  1723. * of packets, add the address to the address database using
  1724. * a port bitmap that has only the bit for this port set and
  1725. * the other bits clear.
  1726. */
  1727. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
  1728. 1 << port);
  1729. if (ret)
  1730. goto abort;
  1731. /* Egress rate control 2: disable egress rate control. */
  1732. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
  1733. 0x0000);
  1734. if (ret)
  1735. goto abort;
  1736. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1737. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1738. mv88e6xxx_6320_family(ds)) {
  1739. /* Do not limit the period of time that this port can
  1740. * be paused for by the remote end or the period of
  1741. * time that this port can pause the remote end.
  1742. */
  1743. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1744. PORT_PAUSE_CTRL, 0x0000);
  1745. if (ret)
  1746. goto abort;
  1747. /* Port ATU control: disable limiting the number of
  1748. * address database entries that this port is allowed
  1749. * to use.
  1750. */
  1751. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1752. PORT_ATU_CONTROL, 0x0000);
  1753. /* Priority Override: disable DA, SA and VTU priority
  1754. * override.
  1755. */
  1756. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1757. PORT_PRI_OVERRIDE, 0x0000);
  1758. if (ret)
  1759. goto abort;
  1760. /* Port Ethertype: use the Ethertype DSA Ethertype
  1761. * value.
  1762. */
  1763. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1764. PORT_ETH_TYPE, ETH_P_EDSA);
  1765. if (ret)
  1766. goto abort;
  1767. /* Tag Remap: use an identity 802.1p prio -> switch
  1768. * prio mapping.
  1769. */
  1770. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1771. PORT_TAG_REGMAP_0123, 0x3210);
  1772. if (ret)
  1773. goto abort;
  1774. /* Tag Remap 2: use an identity 802.1p prio -> switch
  1775. * prio mapping.
  1776. */
  1777. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1778. PORT_TAG_REGMAP_4567, 0x7654);
  1779. if (ret)
  1780. goto abort;
  1781. }
  1782. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1783. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1784. mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
  1785. mv88e6xxx_6320_family(ds)) {
  1786. /* Rate Control: disable ingress rate limiting. */
  1787. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1788. PORT_RATE_CONTROL, 0x0001);
  1789. if (ret)
  1790. goto abort;
  1791. }
  1792. /* Port Control 1: disable trunking, disable sending
  1793. * learning messages to this port.
  1794. */
  1795. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
  1796. if (ret)
  1797. goto abort;
  1798. /* Port based VLAN map: give each port its own address
  1799. * database, allow the CPU port to talk to each of the 'real'
  1800. * ports, and allow each of the 'real' ports to only talk to
  1801. * the upstream port.
  1802. */
  1803. fid = port + 1;
  1804. ps->fid[port] = fid;
  1805. set_bit(fid, ps->fid_bitmap);
  1806. if (!dsa_is_cpu_port(ds, port))
  1807. ps->bridge_mask[fid] = 1 << port;
  1808. ret = _mv88e6xxx_update_port_config(ds, port);
  1809. if (ret)
  1810. goto abort;
  1811. /* Default VLAN ID and priority: don't set a default VLAN
  1812. * ID, and set the default packet priority to zero.
  1813. */
  1814. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
  1815. 0x0000);
  1816. abort:
  1817. mutex_unlock(&ps->smi_mutex);
  1818. return ret;
  1819. }
  1820. int mv88e6xxx_setup_ports(struct dsa_switch *ds)
  1821. {
  1822. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1823. int ret;
  1824. int i;
  1825. for (i = 0; i < ps->num_ports; i++) {
  1826. ret = mv88e6xxx_setup_port(ds, i);
  1827. if (ret < 0)
  1828. return ret;
  1829. }
  1830. return 0;
  1831. }
  1832. static int mv88e6xxx_regs_show(struct seq_file *s, void *p)
  1833. {
  1834. struct dsa_switch *ds = s->private;
  1835. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1836. int reg, port;
  1837. seq_puts(s, " GLOBAL GLOBAL2 ");
  1838. for (port = 0 ; port < ps->num_ports; port++)
  1839. seq_printf(s, " %2d ", port);
  1840. seq_puts(s, "\n");
  1841. for (reg = 0; reg < 32; reg++) {
  1842. seq_printf(s, "%2x: ", reg);
  1843. seq_printf(s, " %4x %4x ",
  1844. mv88e6xxx_reg_read(ds, REG_GLOBAL, reg),
  1845. mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg));
  1846. for (port = 0 ; port < ps->num_ports; port++)
  1847. seq_printf(s, "%4x ",
  1848. mv88e6xxx_reg_read(ds, REG_PORT(port), reg));
  1849. seq_puts(s, "\n");
  1850. }
  1851. return 0;
  1852. }
  1853. static int mv88e6xxx_regs_open(struct inode *inode, struct file *file)
  1854. {
  1855. return single_open(file, mv88e6xxx_regs_show, inode->i_private);
  1856. }
  1857. static const struct file_operations mv88e6xxx_regs_fops = {
  1858. .open = mv88e6xxx_regs_open,
  1859. .read = seq_read,
  1860. .llseek = no_llseek,
  1861. .release = single_release,
  1862. .owner = THIS_MODULE,
  1863. };
  1864. static void mv88e6xxx_atu_show_header(struct seq_file *s)
  1865. {
  1866. seq_puts(s, "DB T/P Vec State Addr\n");
  1867. }
  1868. static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum,
  1869. unsigned char *addr, int data)
  1870. {
  1871. bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK);
  1872. int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >>
  1873. GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT);
  1874. int state = data & GLOBAL_ATU_DATA_STATE_MASK;
  1875. seq_printf(s, "%03x %5s %10pb %x %pM\n",
  1876. dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr);
  1877. }
  1878. static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
  1879. int dbnum)
  1880. {
  1881. unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  1882. unsigned char addr[6];
  1883. int ret, data, state;
  1884. ret = _mv88e6xxx_atu_mac_write(ds, bcast);
  1885. if (ret < 0)
  1886. return ret;
  1887. do {
  1888. ret = _mv88e6xxx_atu_cmd(ds, dbnum, GLOBAL_ATU_OP_GET_NEXT_DB);
  1889. if (ret < 0)
  1890. return ret;
  1891. data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
  1892. if (data < 0)
  1893. return data;
  1894. state = data & GLOBAL_ATU_DATA_STATE_MASK;
  1895. if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
  1896. break;
  1897. ret = _mv88e6xxx_atu_mac_read(ds, addr);
  1898. if (ret < 0)
  1899. return ret;
  1900. mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
  1901. } while (state != GLOBAL_ATU_DATA_STATE_UNUSED);
  1902. return 0;
  1903. }
  1904. static int mv88e6xxx_atu_show(struct seq_file *s, void *p)
  1905. {
  1906. struct dsa_switch *ds = s->private;
  1907. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1908. int dbnum;
  1909. mv88e6xxx_atu_show_header(s);
  1910. for (dbnum = 0; dbnum < 255; dbnum++) {
  1911. mutex_lock(&ps->smi_mutex);
  1912. mv88e6xxx_atu_show_db(s, ds, dbnum);
  1913. mutex_unlock(&ps->smi_mutex);
  1914. }
  1915. return 0;
  1916. }
  1917. static int mv88e6xxx_atu_open(struct inode *inode, struct file *file)
  1918. {
  1919. return single_open(file, mv88e6xxx_atu_show, inode->i_private);
  1920. }
  1921. static const struct file_operations mv88e6xxx_atu_fops = {
  1922. .open = mv88e6xxx_atu_open,
  1923. .read = seq_read,
  1924. .llseek = no_llseek,
  1925. .release = single_release,
  1926. .owner = THIS_MODULE,
  1927. };
  1928. static void mv88e6xxx_stats_show_header(struct seq_file *s,
  1929. struct mv88e6xxx_priv_state *ps)
  1930. {
  1931. int port;
  1932. seq_puts(s, " Statistic ");
  1933. for (port = 0 ; port < ps->num_ports; port++)
  1934. seq_printf(s, "Port %2d ", port);
  1935. seq_puts(s, "\n");
  1936. }
  1937. static int mv88e6xxx_stats_show(struct seq_file *s, void *p)
  1938. {
  1939. struct dsa_switch *ds = s->private;
  1940. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1941. struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats;
  1942. int port, stat, max_stats;
  1943. uint64_t value;
  1944. if (have_sw_in_discards(ds))
  1945. max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats);
  1946. else
  1947. max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
  1948. mv88e6xxx_stats_show_header(s, ps);
  1949. mutex_lock(&ps->smi_mutex);
  1950. for (stat = 0; stat < max_stats; stat++) {
  1951. seq_printf(s, "%19s: ", stats[stat].string);
  1952. for (port = 0 ; port < ps->num_ports; port++) {
  1953. _mv88e6xxx_stats_snapshot(ds, port);
  1954. value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats,
  1955. port);
  1956. seq_printf(s, "%8llu ", value);
  1957. }
  1958. seq_puts(s, "\n");
  1959. }
  1960. mutex_unlock(&ps->smi_mutex);
  1961. return 0;
  1962. }
  1963. static int mv88e6xxx_stats_open(struct inode *inode, struct file *file)
  1964. {
  1965. return single_open(file, mv88e6xxx_stats_show, inode->i_private);
  1966. }
  1967. static const struct file_operations mv88e6xxx_stats_fops = {
  1968. .open = mv88e6xxx_stats_open,
  1969. .read = seq_read,
  1970. .llseek = no_llseek,
  1971. .release = single_release,
  1972. .owner = THIS_MODULE,
  1973. };
  1974. static int mv88e6xxx_device_map_show(struct seq_file *s, void *p)
  1975. {
  1976. struct dsa_switch *ds = s->private;
  1977. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1978. int target, ret;
  1979. seq_puts(s, "Target Port\n");
  1980. mutex_lock(&ps->smi_mutex);
  1981. for (target = 0; target < 32; target++) {
  1982. ret = _mv88e6xxx_reg_write(
  1983. ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
  1984. target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT);
  1985. if (ret < 0)
  1986. goto out;
  1987. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
  1988. GLOBAL2_DEVICE_MAPPING);
  1989. seq_printf(s, " %2d %2d\n", target,
  1990. ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK);
  1991. }
  1992. out:
  1993. mutex_unlock(&ps->smi_mutex);
  1994. return 0;
  1995. }
  1996. static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file)
  1997. {
  1998. return single_open(file, mv88e6xxx_device_map_show, inode->i_private);
  1999. }
  2000. static const struct file_operations mv88e6xxx_device_map_fops = {
  2001. .open = mv88e6xxx_device_map_open,
  2002. .read = seq_read,
  2003. .llseek = no_llseek,
  2004. .release = single_release,
  2005. .owner = THIS_MODULE,
  2006. };
  2007. static int mv88e6xxx_scratch_show(struct seq_file *s, void *p)
  2008. {
  2009. struct dsa_switch *ds = s->private;
  2010. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2011. int reg, ret;
  2012. seq_puts(s, "Register Value\n");
  2013. mutex_lock(&ps->smi_mutex);
  2014. for (reg = 0; reg < 0x80; reg++) {
  2015. ret = _mv88e6xxx_reg_write(
  2016. ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
  2017. reg << GLOBAL2_SCRATCH_REGISTER_SHIFT);
  2018. if (ret < 0)
  2019. goto out;
  2020. ret = _mv88e6xxx_scratch_wait(ds);
  2021. if (ret < 0)
  2022. goto out;
  2023. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
  2024. GLOBAL2_SCRATCH_MISC);
  2025. seq_printf(s, " %2x %2x\n", reg,
  2026. ret & GLOBAL2_SCRATCH_VALUE_MASK);
  2027. }
  2028. out:
  2029. mutex_unlock(&ps->smi_mutex);
  2030. return 0;
  2031. }
  2032. static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file)
  2033. {
  2034. return single_open(file, mv88e6xxx_scratch_show, inode->i_private);
  2035. }
  2036. static const struct file_operations mv88e6xxx_scratch_fops = {
  2037. .open = mv88e6xxx_scratch_open,
  2038. .read = seq_read,
  2039. .llseek = no_llseek,
  2040. .release = single_release,
  2041. .owner = THIS_MODULE,
  2042. };
  2043. int mv88e6xxx_setup_common(struct dsa_switch *ds)
  2044. {
  2045. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2046. char *name;
  2047. mutex_init(&ps->smi_mutex);
  2048. ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
  2049. INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
  2050. name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
  2051. ps->dbgfs = debugfs_create_dir(name, NULL);
  2052. kfree(name);
  2053. debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds,
  2054. &mv88e6xxx_regs_fops);
  2055. debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds,
  2056. &mv88e6xxx_atu_fops);
  2057. debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds,
  2058. &mv88e6xxx_stats_fops);
  2059. debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds,
  2060. &mv88e6xxx_device_map_fops);
  2061. debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds,
  2062. &mv88e6xxx_scratch_fops);
  2063. return 0;
  2064. }
  2065. int mv88e6xxx_setup_global(struct dsa_switch *ds)
  2066. {
  2067. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2068. int ret;
  2069. int i;
  2070. /* Set the default address aging time to 5 minutes, and
  2071. * enable address learn messages to be sent to all message
  2072. * ports.
  2073. */
  2074. REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
  2075. 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
  2076. /* Configure the IP ToS mapping registers. */
  2077. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
  2078. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
  2079. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
  2080. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
  2081. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
  2082. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
  2083. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
  2084. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
  2085. /* Configure the IEEE 802.1p priority mapping register. */
  2086. REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
  2087. /* Send all frames with destination addresses matching
  2088. * 01:80:c2:00:00:0x to the CPU port.
  2089. */
  2090. REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
  2091. /* Ignore removed tag data on doubly tagged packets, disable
  2092. * flow control messages, force flow control priority to the
  2093. * highest, and send all special multicast frames to the CPU
  2094. * port at the highest priority.
  2095. */
  2096. REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
  2097. 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
  2098. GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
  2099. /* Program the DSA routing table. */
  2100. for (i = 0; i < 32; i++) {
  2101. int nexthop = 0x1f;
  2102. if (ds->pd->rtable &&
  2103. i != ds->index && i < ds->dst->pd->nr_chips)
  2104. nexthop = ds->pd->rtable[i] & 0x1f;
  2105. REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
  2106. GLOBAL2_DEVICE_MAPPING_UPDATE |
  2107. (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
  2108. nexthop);
  2109. }
  2110. /* Clear all trunk masks. */
  2111. for (i = 0; i < 8; i++)
  2112. REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
  2113. 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
  2114. ((1 << ps->num_ports) - 1));
  2115. /* Clear all trunk mappings. */
  2116. for (i = 0; i < 16; i++)
  2117. REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
  2118. GLOBAL2_TRUNK_MAPPING_UPDATE |
  2119. (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
  2120. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  2121. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  2122. mv88e6xxx_6320_family(ds)) {
  2123. /* Send all frames with destination addresses matching
  2124. * 01:80:c2:00:00:2x to the CPU port.
  2125. */
  2126. REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
  2127. /* Initialise cross-chip port VLAN table to reset
  2128. * defaults.
  2129. */
  2130. REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
  2131. /* Clear the priority override table. */
  2132. for (i = 0; i < 16; i++)
  2133. REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
  2134. 0x8000 | (i << 8));
  2135. }
  2136. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  2137. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  2138. mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
  2139. mv88e6xxx_6320_family(ds)) {
  2140. /* Disable ingress rate limiting by resetting all
  2141. * ingress rate limit registers to their initial
  2142. * state.
  2143. */
  2144. for (i = 0; i < ps->num_ports; i++)
  2145. REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
  2146. 0x9000 | (i << 8));
  2147. }
  2148. /* Clear the statistics counters for all ports */
  2149. REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
  2150. /* Wait for the flush to complete. */
  2151. mutex_lock(&ps->smi_mutex);
  2152. ret = _mv88e6xxx_stats_wait(ds);
  2153. if (ret < 0)
  2154. goto unlock;
  2155. /* Clear all the VTU and STU entries */
  2156. ret = _mv88e6xxx_vtu_stu_flush(ds);
  2157. unlock:
  2158. mutex_unlock(&ps->smi_mutex);
  2159. return ret;
  2160. }
  2161. int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
  2162. {
  2163. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2164. u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
  2165. unsigned long timeout;
  2166. int ret;
  2167. int i;
  2168. /* Set all ports to the disabled state. */
  2169. for (i = 0; i < ps->num_ports; i++) {
  2170. ret = REG_READ(REG_PORT(i), PORT_CONTROL);
  2171. REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
  2172. }
  2173. /* Wait for transmit queues to drain. */
  2174. usleep_range(2000, 4000);
  2175. /* Reset the switch. Keep the PPU active if requested. The PPU
  2176. * needs to be active to support indirect phy register access
  2177. * through global registers 0x18 and 0x19.
  2178. */
  2179. if (ppu_active)
  2180. REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
  2181. else
  2182. REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
  2183. /* Wait up to one second for reset to complete. */
  2184. timeout = jiffies + 1 * HZ;
  2185. while (time_before(jiffies, timeout)) {
  2186. ret = REG_READ(REG_GLOBAL, 0x00);
  2187. if ((ret & is_reset) == is_reset)
  2188. break;
  2189. usleep_range(1000, 2000);
  2190. }
  2191. if (time_after(jiffies, timeout))
  2192. return -ETIMEDOUT;
  2193. return 0;
  2194. }
  2195. int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
  2196. {
  2197. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2198. int ret;
  2199. mutex_lock(&ps->smi_mutex);
  2200. ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
  2201. if (ret < 0)
  2202. goto error;
  2203. ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
  2204. error:
  2205. _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
  2206. mutex_unlock(&ps->smi_mutex);
  2207. return ret;
  2208. }
  2209. int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
  2210. int reg, int val)
  2211. {
  2212. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2213. int ret;
  2214. mutex_lock(&ps->smi_mutex);
  2215. ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
  2216. if (ret < 0)
  2217. goto error;
  2218. ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
  2219. error:
  2220. _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
  2221. mutex_unlock(&ps->smi_mutex);
  2222. return ret;
  2223. }
  2224. static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
  2225. {
  2226. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2227. if (port >= 0 && port < ps->num_ports)
  2228. return port;
  2229. return -EINVAL;
  2230. }
  2231. int
  2232. mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
  2233. {
  2234. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2235. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2236. int ret;
  2237. if (addr < 0)
  2238. return addr;
  2239. mutex_lock(&ps->smi_mutex);
  2240. ret = _mv88e6xxx_phy_read(ds, addr, regnum);
  2241. mutex_unlock(&ps->smi_mutex);
  2242. return ret;
  2243. }
  2244. int
  2245. mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
  2246. {
  2247. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2248. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2249. int ret;
  2250. if (addr < 0)
  2251. return addr;
  2252. mutex_lock(&ps->smi_mutex);
  2253. ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
  2254. mutex_unlock(&ps->smi_mutex);
  2255. return ret;
  2256. }
  2257. int
  2258. mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
  2259. {
  2260. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2261. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2262. int ret;
  2263. if (addr < 0)
  2264. return addr;
  2265. mutex_lock(&ps->smi_mutex);
  2266. ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
  2267. mutex_unlock(&ps->smi_mutex);
  2268. return ret;
  2269. }
  2270. int
  2271. mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
  2272. u16 val)
  2273. {
  2274. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2275. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2276. int ret;
  2277. if (addr < 0)
  2278. return addr;
  2279. mutex_lock(&ps->smi_mutex);
  2280. ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
  2281. mutex_unlock(&ps->smi_mutex);
  2282. return ret;
  2283. }
  2284. #ifdef CONFIG_NET_DSA_HWMON
  2285. static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
  2286. {
  2287. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2288. int ret;
  2289. int val;
  2290. *temp = 0;
  2291. mutex_lock(&ps->smi_mutex);
  2292. ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
  2293. if (ret < 0)
  2294. goto error;
  2295. /* Enable temperature sensor */
  2296. ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
  2297. if (ret < 0)
  2298. goto error;
  2299. ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
  2300. if (ret < 0)
  2301. goto error;
  2302. /* Wait for temperature to stabilize */
  2303. usleep_range(10000, 12000);
  2304. val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
  2305. if (val < 0) {
  2306. ret = val;
  2307. goto error;
  2308. }
  2309. /* Disable temperature sensor */
  2310. ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
  2311. if (ret < 0)
  2312. goto error;
  2313. *temp = ((val & 0x1f) - 5) * 5;
  2314. error:
  2315. _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
  2316. mutex_unlock(&ps->smi_mutex);
  2317. return ret;
  2318. }
  2319. static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
  2320. {
  2321. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2322. int ret;
  2323. *temp = 0;
  2324. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
  2325. if (ret < 0)
  2326. return ret;
  2327. *temp = (ret & 0xff) - 25;
  2328. return 0;
  2329. }
  2330. int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
  2331. {
  2332. if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
  2333. return mv88e63xx_get_temp(ds, temp);
  2334. return mv88e61xx_get_temp(ds, temp);
  2335. }
  2336. int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
  2337. {
  2338. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2339. int ret;
  2340. if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
  2341. return -EOPNOTSUPP;
  2342. *temp = 0;
  2343. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
  2344. if (ret < 0)
  2345. return ret;
  2346. *temp = (((ret >> 8) & 0x1f) * 5) - 25;
  2347. return 0;
  2348. }
  2349. int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
  2350. {
  2351. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2352. int ret;
  2353. if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
  2354. return -EOPNOTSUPP;
  2355. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
  2356. if (ret < 0)
  2357. return ret;
  2358. temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
  2359. return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
  2360. (ret & 0xe0ff) | (temp << 8));
  2361. }
  2362. int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
  2363. {
  2364. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2365. int ret;
  2366. if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
  2367. return -EOPNOTSUPP;
  2368. *alarm = false;
  2369. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
  2370. if (ret < 0)
  2371. return ret;
  2372. *alarm = !!(ret & 0x40);
  2373. return 0;
  2374. }
  2375. #endif /* CONFIG_NET_DSA_HWMON */
  2376. static int __init mv88e6xxx_init(void)
  2377. {
  2378. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
  2379. register_switch_driver(&mv88e6131_switch_driver);
  2380. #endif
  2381. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
  2382. register_switch_driver(&mv88e6123_61_65_switch_driver);
  2383. #endif
  2384. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
  2385. register_switch_driver(&mv88e6352_switch_driver);
  2386. #endif
  2387. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
  2388. register_switch_driver(&mv88e6171_switch_driver);
  2389. #endif
  2390. return 0;
  2391. }
  2392. module_init(mv88e6xxx_init);
  2393. static void __exit mv88e6xxx_cleanup(void)
  2394. {
  2395. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
  2396. unregister_switch_driver(&mv88e6171_switch_driver);
  2397. #endif
  2398. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
  2399. unregister_switch_driver(&mv88e6352_switch_driver);
  2400. #endif
  2401. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
  2402. unregister_switch_driver(&mv88e6123_61_65_switch_driver);
  2403. #endif
  2404. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
  2405. unregister_switch_driver(&mv88e6131_switch_driver);
  2406. #endif
  2407. }
  2408. module_exit(mv88e6xxx_cleanup);
  2409. MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
  2410. MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
  2411. MODULE_LICENSE("GPL");