mv88e6xxx.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014
  1. /*
  2. * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
  3. * Copyright (c) 2008 Marvell Semiconductor
  4. *
  5. * Copyright (c) 2015 CMC Electronics, Inc.
  6. * Added support for VLAN Table Unit operations
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/delay.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/ethtool.h>
  16. #include <linux/if_bridge.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/list.h>
  19. #include <linux/module.h>
  20. #include <linux/netdevice.h>
  21. #include <linux/gpio/consumer.h>
  22. #include <linux/phy.h>
  23. #include <net/dsa.h>
  24. #include <net/switchdev.h>
  25. #include "mv88e6xxx.h"
  26. static void assert_smi_lock(struct dsa_switch *ds)
  27. {
  28. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  29. if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
  30. dev_err(ds->master_dev, "SMI lock not held!\n");
  31. dump_stack();
  32. }
  33. }
  34. /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
  35. * use all 32 SMI bus addresses on its SMI bus, and all switch registers
  36. * will be directly accessible on some {device address,register address}
  37. * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
  38. * will only respond to SMI transactions to that specific address, and
  39. * an indirect addressing mechanism needs to be used to access its
  40. * registers.
  41. */
  42. static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
  43. {
  44. int ret;
  45. int i;
  46. for (i = 0; i < 16; i++) {
  47. ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
  48. if (ret < 0)
  49. return ret;
  50. if ((ret & SMI_CMD_BUSY) == 0)
  51. return 0;
  52. }
  53. return -ETIMEDOUT;
  54. }
  55. static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
  56. int reg)
  57. {
  58. int ret;
  59. if (sw_addr == 0)
  60. return mdiobus_read_nested(bus, addr, reg);
  61. /* Wait for the bus to become free. */
  62. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  63. if (ret < 0)
  64. return ret;
  65. /* Transmit the read command. */
  66. ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
  67. SMI_CMD_OP_22_READ | (addr << 5) | reg);
  68. if (ret < 0)
  69. return ret;
  70. /* Wait for the read command to complete. */
  71. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  72. if (ret < 0)
  73. return ret;
  74. /* Read the data. */
  75. ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
  76. if (ret < 0)
  77. return ret;
  78. return ret & 0xffff;
  79. }
  80. static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
  81. {
  82. struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
  83. int ret;
  84. assert_smi_lock(ds);
  85. if (bus == NULL)
  86. return -EINVAL;
  87. ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
  88. if (ret < 0)
  89. return ret;
  90. dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
  91. addr, reg, ret);
  92. return ret;
  93. }
  94. int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
  95. {
  96. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  97. int ret;
  98. mutex_lock(&ps->smi_mutex);
  99. ret = _mv88e6xxx_reg_read(ds, addr, reg);
  100. mutex_unlock(&ps->smi_mutex);
  101. return ret;
  102. }
  103. static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
  104. int reg, u16 val)
  105. {
  106. int ret;
  107. if (sw_addr == 0)
  108. return mdiobus_write_nested(bus, addr, reg, val);
  109. /* Wait for the bus to become free. */
  110. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  111. if (ret < 0)
  112. return ret;
  113. /* Transmit the data to write. */
  114. ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
  115. if (ret < 0)
  116. return ret;
  117. /* Transmit the write command. */
  118. ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
  119. SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
  120. if (ret < 0)
  121. return ret;
  122. /* Wait for the write command to complete. */
  123. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  124. if (ret < 0)
  125. return ret;
  126. return 0;
  127. }
  128. static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
  129. u16 val)
  130. {
  131. struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
  132. assert_smi_lock(ds);
  133. if (bus == NULL)
  134. return -EINVAL;
  135. dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
  136. addr, reg, val);
  137. return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
  138. }
  139. int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
  140. {
  141. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  142. int ret;
  143. mutex_lock(&ps->smi_mutex);
  144. ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
  145. mutex_unlock(&ps->smi_mutex);
  146. return ret;
  147. }
  148. int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
  149. {
  150. REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
  151. REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
  152. REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
  153. return 0;
  154. }
  155. int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
  156. {
  157. int i;
  158. int ret;
  159. for (i = 0; i < 6; i++) {
  160. int j;
  161. /* Write the MAC address byte. */
  162. REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
  163. GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
  164. /* Wait for the write to complete. */
  165. for (j = 0; j < 16; j++) {
  166. ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
  167. if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
  168. break;
  169. }
  170. if (j == 16)
  171. return -ETIMEDOUT;
  172. }
  173. return 0;
  174. }
  175. static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
  176. {
  177. if (addr >= 0)
  178. return _mv88e6xxx_reg_read(ds, addr, regnum);
  179. return 0xffff;
  180. }
  181. static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
  182. u16 val)
  183. {
  184. if (addr >= 0)
  185. return _mv88e6xxx_reg_write(ds, addr, regnum, val);
  186. return 0;
  187. }
  188. #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
  189. static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
  190. {
  191. int ret;
  192. unsigned long timeout;
  193. ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
  194. REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
  195. ret & ~GLOBAL_CONTROL_PPU_ENABLE);
  196. timeout = jiffies + 1 * HZ;
  197. while (time_before(jiffies, timeout)) {
  198. ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
  199. usleep_range(1000, 2000);
  200. if ((ret & GLOBAL_STATUS_PPU_MASK) !=
  201. GLOBAL_STATUS_PPU_POLLING)
  202. return 0;
  203. }
  204. return -ETIMEDOUT;
  205. }
  206. static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
  207. {
  208. int ret;
  209. unsigned long timeout;
  210. ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
  211. REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
  212. timeout = jiffies + 1 * HZ;
  213. while (time_before(jiffies, timeout)) {
  214. ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
  215. usleep_range(1000, 2000);
  216. if ((ret & GLOBAL_STATUS_PPU_MASK) ==
  217. GLOBAL_STATUS_PPU_POLLING)
  218. return 0;
  219. }
  220. return -ETIMEDOUT;
  221. }
  222. static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
  223. {
  224. struct mv88e6xxx_priv_state *ps;
  225. ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
  226. if (mutex_trylock(&ps->ppu_mutex)) {
  227. struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
  228. if (mv88e6xxx_ppu_enable(ds) == 0)
  229. ps->ppu_disabled = 0;
  230. mutex_unlock(&ps->ppu_mutex);
  231. }
  232. }
  233. static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
  234. {
  235. struct mv88e6xxx_priv_state *ps = (void *)_ps;
  236. schedule_work(&ps->ppu_work);
  237. }
  238. static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
  239. {
  240. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  241. int ret;
  242. mutex_lock(&ps->ppu_mutex);
  243. /* If the PHY polling unit is enabled, disable it so that
  244. * we can access the PHY registers. If it was already
  245. * disabled, cancel the timer that is going to re-enable
  246. * it.
  247. */
  248. if (!ps->ppu_disabled) {
  249. ret = mv88e6xxx_ppu_disable(ds);
  250. if (ret < 0) {
  251. mutex_unlock(&ps->ppu_mutex);
  252. return ret;
  253. }
  254. ps->ppu_disabled = 1;
  255. } else {
  256. del_timer(&ps->ppu_timer);
  257. ret = 0;
  258. }
  259. return ret;
  260. }
  261. static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
  262. {
  263. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  264. /* Schedule a timer to re-enable the PHY polling unit. */
  265. mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
  266. mutex_unlock(&ps->ppu_mutex);
  267. }
  268. void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
  269. {
  270. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  271. mutex_init(&ps->ppu_mutex);
  272. INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
  273. init_timer(&ps->ppu_timer);
  274. ps->ppu_timer.data = (unsigned long)ps;
  275. ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
  276. }
  277. int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
  278. {
  279. int ret;
  280. ret = mv88e6xxx_ppu_access_get(ds);
  281. if (ret >= 0) {
  282. ret = mv88e6xxx_reg_read(ds, addr, regnum);
  283. mv88e6xxx_ppu_access_put(ds);
  284. }
  285. return ret;
  286. }
  287. int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
  288. int regnum, u16 val)
  289. {
  290. int ret;
  291. ret = mv88e6xxx_ppu_access_get(ds);
  292. if (ret >= 0) {
  293. ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
  294. mv88e6xxx_ppu_access_put(ds);
  295. }
  296. return ret;
  297. }
  298. #endif
  299. static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
  300. {
  301. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  302. switch (ps->id) {
  303. case PORT_SWITCH_ID_6031:
  304. case PORT_SWITCH_ID_6061:
  305. case PORT_SWITCH_ID_6035:
  306. case PORT_SWITCH_ID_6065:
  307. return true;
  308. }
  309. return false;
  310. }
  311. static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
  312. {
  313. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  314. switch (ps->id) {
  315. case PORT_SWITCH_ID_6092:
  316. case PORT_SWITCH_ID_6095:
  317. return true;
  318. }
  319. return false;
  320. }
  321. static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
  322. {
  323. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  324. switch (ps->id) {
  325. case PORT_SWITCH_ID_6046:
  326. case PORT_SWITCH_ID_6085:
  327. case PORT_SWITCH_ID_6096:
  328. case PORT_SWITCH_ID_6097:
  329. return true;
  330. }
  331. return false;
  332. }
  333. static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
  334. {
  335. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  336. switch (ps->id) {
  337. case PORT_SWITCH_ID_6123:
  338. case PORT_SWITCH_ID_6161:
  339. case PORT_SWITCH_ID_6165:
  340. return true;
  341. }
  342. return false;
  343. }
  344. static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
  345. {
  346. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  347. switch (ps->id) {
  348. case PORT_SWITCH_ID_6121:
  349. case PORT_SWITCH_ID_6122:
  350. case PORT_SWITCH_ID_6152:
  351. case PORT_SWITCH_ID_6155:
  352. case PORT_SWITCH_ID_6182:
  353. case PORT_SWITCH_ID_6185:
  354. case PORT_SWITCH_ID_6108:
  355. case PORT_SWITCH_ID_6131:
  356. return true;
  357. }
  358. return false;
  359. }
  360. static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
  361. {
  362. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  363. switch (ps->id) {
  364. case PORT_SWITCH_ID_6320:
  365. case PORT_SWITCH_ID_6321:
  366. return true;
  367. }
  368. return false;
  369. }
  370. static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
  371. {
  372. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  373. switch (ps->id) {
  374. case PORT_SWITCH_ID_6171:
  375. case PORT_SWITCH_ID_6175:
  376. case PORT_SWITCH_ID_6350:
  377. case PORT_SWITCH_ID_6351:
  378. return true;
  379. }
  380. return false;
  381. }
  382. static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
  383. {
  384. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  385. switch (ps->id) {
  386. case PORT_SWITCH_ID_6172:
  387. case PORT_SWITCH_ID_6176:
  388. case PORT_SWITCH_ID_6240:
  389. case PORT_SWITCH_ID_6352:
  390. return true;
  391. }
  392. return false;
  393. }
  394. /* We expect the switch to perform auto negotiation if there is a real
  395. * phy. However, in the case of a fixed link phy, we force the port
  396. * settings from the fixed link settings.
  397. */
  398. void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
  399. struct phy_device *phydev)
  400. {
  401. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  402. u32 reg;
  403. int ret;
  404. if (!phy_is_pseudo_fixed_link(phydev))
  405. return;
  406. mutex_lock(&ps->smi_mutex);
  407. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
  408. if (ret < 0)
  409. goto out;
  410. reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
  411. PORT_PCS_CTRL_FORCE_LINK |
  412. PORT_PCS_CTRL_DUPLEX_FULL |
  413. PORT_PCS_CTRL_FORCE_DUPLEX |
  414. PORT_PCS_CTRL_UNFORCED);
  415. reg |= PORT_PCS_CTRL_FORCE_LINK;
  416. if (phydev->link)
  417. reg |= PORT_PCS_CTRL_LINK_UP;
  418. if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
  419. goto out;
  420. switch (phydev->speed) {
  421. case SPEED_1000:
  422. reg |= PORT_PCS_CTRL_1000;
  423. break;
  424. case SPEED_100:
  425. reg |= PORT_PCS_CTRL_100;
  426. break;
  427. case SPEED_10:
  428. reg |= PORT_PCS_CTRL_10;
  429. break;
  430. default:
  431. pr_info("Unknown speed");
  432. goto out;
  433. }
  434. reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
  435. if (phydev->duplex == DUPLEX_FULL)
  436. reg |= PORT_PCS_CTRL_DUPLEX_FULL;
  437. if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
  438. (port >= ps->num_ports - 2)) {
  439. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
  440. reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
  441. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
  442. reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
  443. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
  444. reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
  445. PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
  446. }
  447. _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
  448. out:
  449. mutex_unlock(&ps->smi_mutex);
  450. }
  451. static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
  452. {
  453. int ret;
  454. int i;
  455. for (i = 0; i < 10; i++) {
  456. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
  457. if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
  458. return 0;
  459. }
  460. return -ETIMEDOUT;
  461. }
  462. static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
  463. {
  464. int ret;
  465. if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
  466. port = (port + 1) << 5;
  467. /* Snapshot the hardware statistics counters for this port. */
  468. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
  469. GLOBAL_STATS_OP_CAPTURE_PORT |
  470. GLOBAL_STATS_OP_HIST_RX_TX | port);
  471. if (ret < 0)
  472. return ret;
  473. /* Wait for the snapshotting to complete. */
  474. ret = _mv88e6xxx_stats_wait(ds);
  475. if (ret < 0)
  476. return ret;
  477. return 0;
  478. }
  479. static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
  480. {
  481. u32 _val;
  482. int ret;
  483. *val = 0;
  484. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
  485. GLOBAL_STATS_OP_READ_CAPTURED |
  486. GLOBAL_STATS_OP_HIST_RX_TX | stat);
  487. if (ret < 0)
  488. return;
  489. ret = _mv88e6xxx_stats_wait(ds);
  490. if (ret < 0)
  491. return;
  492. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
  493. if (ret < 0)
  494. return;
  495. _val = ret << 16;
  496. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
  497. if (ret < 0)
  498. return;
  499. *val = _val | ret;
  500. }
  501. static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
  502. { "in_good_octets", 8, 0x00, BANK0, },
  503. { "in_bad_octets", 4, 0x02, BANK0, },
  504. { "in_unicast", 4, 0x04, BANK0, },
  505. { "in_broadcasts", 4, 0x06, BANK0, },
  506. { "in_multicasts", 4, 0x07, BANK0, },
  507. { "in_pause", 4, 0x16, BANK0, },
  508. { "in_undersize", 4, 0x18, BANK0, },
  509. { "in_fragments", 4, 0x19, BANK0, },
  510. { "in_oversize", 4, 0x1a, BANK0, },
  511. { "in_jabber", 4, 0x1b, BANK0, },
  512. { "in_rx_error", 4, 0x1c, BANK0, },
  513. { "in_fcs_error", 4, 0x1d, BANK0, },
  514. { "out_octets", 8, 0x0e, BANK0, },
  515. { "out_unicast", 4, 0x10, BANK0, },
  516. { "out_broadcasts", 4, 0x13, BANK0, },
  517. { "out_multicasts", 4, 0x12, BANK0, },
  518. { "out_pause", 4, 0x15, BANK0, },
  519. { "excessive", 4, 0x11, BANK0, },
  520. { "collisions", 4, 0x1e, BANK0, },
  521. { "deferred", 4, 0x05, BANK0, },
  522. { "single", 4, 0x14, BANK0, },
  523. { "multiple", 4, 0x17, BANK0, },
  524. { "out_fcs_error", 4, 0x03, BANK0, },
  525. { "late", 4, 0x1f, BANK0, },
  526. { "hist_64bytes", 4, 0x08, BANK0, },
  527. { "hist_65_127bytes", 4, 0x09, BANK0, },
  528. { "hist_128_255bytes", 4, 0x0a, BANK0, },
  529. { "hist_256_511bytes", 4, 0x0b, BANK0, },
  530. { "hist_512_1023bytes", 4, 0x0c, BANK0, },
  531. { "hist_1024_max_bytes", 4, 0x0d, BANK0, },
  532. { "sw_in_discards", 4, 0x10, PORT, },
  533. { "sw_in_filtered", 2, 0x12, PORT, },
  534. { "sw_out_filtered", 2, 0x13, PORT, },
  535. { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  536. { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  537. { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  538. { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  539. { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  540. { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  541. { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  542. { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  543. { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  544. { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  545. { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, },
  546. { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, },
  547. { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, },
  548. { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, },
  549. { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  550. { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  551. { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  552. { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  553. { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  554. { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  555. { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  556. { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  557. { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  558. { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, },
  559. { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, },
  560. { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
  561. };
  562. static bool mv88e6xxx_has_stat(struct dsa_switch *ds,
  563. struct mv88e6xxx_hw_stat *stat)
  564. {
  565. switch (stat->type) {
  566. case BANK0:
  567. return true;
  568. case BANK1:
  569. return mv88e6xxx_6320_family(ds);
  570. case PORT:
  571. return mv88e6xxx_6095_family(ds) ||
  572. mv88e6xxx_6185_family(ds) ||
  573. mv88e6xxx_6097_family(ds) ||
  574. mv88e6xxx_6165_family(ds) ||
  575. mv88e6xxx_6351_family(ds) ||
  576. mv88e6xxx_6352_family(ds);
  577. }
  578. return false;
  579. }
  580. static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
  581. struct mv88e6xxx_hw_stat *s,
  582. int port)
  583. {
  584. u32 low;
  585. u32 high = 0;
  586. int ret;
  587. u64 value;
  588. switch (s->type) {
  589. case PORT:
  590. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), s->reg);
  591. if (ret < 0)
  592. return UINT64_MAX;
  593. low = ret;
  594. if (s->sizeof_stat == 4) {
  595. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
  596. s->reg + 1);
  597. if (ret < 0)
  598. return UINT64_MAX;
  599. high = ret;
  600. }
  601. break;
  602. case BANK0:
  603. case BANK1:
  604. _mv88e6xxx_stats_read(ds, s->reg, &low);
  605. if (s->sizeof_stat == 8)
  606. _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
  607. }
  608. value = (((u64)high) << 16) | low;
  609. return value;
  610. }
  611. void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
  612. {
  613. struct mv88e6xxx_hw_stat *stat;
  614. int i, j;
  615. for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
  616. stat = &mv88e6xxx_hw_stats[i];
  617. if (mv88e6xxx_has_stat(ds, stat)) {
  618. memcpy(data + j * ETH_GSTRING_LEN, stat->string,
  619. ETH_GSTRING_LEN);
  620. j++;
  621. }
  622. }
  623. }
  624. int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
  625. {
  626. struct mv88e6xxx_hw_stat *stat;
  627. int i, j;
  628. for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
  629. stat = &mv88e6xxx_hw_stats[i];
  630. if (mv88e6xxx_has_stat(ds, stat))
  631. j++;
  632. }
  633. return j;
  634. }
  635. void
  636. mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
  637. int port, uint64_t *data)
  638. {
  639. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  640. struct mv88e6xxx_hw_stat *stat;
  641. int ret;
  642. int i, j;
  643. mutex_lock(&ps->smi_mutex);
  644. ret = _mv88e6xxx_stats_snapshot(ds, port);
  645. if (ret < 0) {
  646. mutex_unlock(&ps->smi_mutex);
  647. return;
  648. }
  649. for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
  650. stat = &mv88e6xxx_hw_stats[i];
  651. if (mv88e6xxx_has_stat(ds, stat)) {
  652. data[j] = _mv88e6xxx_get_ethtool_stat(ds, stat, port);
  653. j++;
  654. }
  655. }
  656. mutex_unlock(&ps->smi_mutex);
  657. }
  658. int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
  659. {
  660. return 32 * sizeof(u16);
  661. }
  662. void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
  663. struct ethtool_regs *regs, void *_p)
  664. {
  665. u16 *p = _p;
  666. int i;
  667. regs->version = 0;
  668. memset(p, 0xff, 32 * sizeof(u16));
  669. for (i = 0; i < 32; i++) {
  670. int ret;
  671. ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
  672. if (ret >= 0)
  673. p[i] = ret;
  674. }
  675. }
  676. static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
  677. u16 mask)
  678. {
  679. unsigned long timeout = jiffies + HZ / 10;
  680. while (time_before(jiffies, timeout)) {
  681. int ret;
  682. ret = _mv88e6xxx_reg_read(ds, reg, offset);
  683. if (ret < 0)
  684. return ret;
  685. if (!(ret & mask))
  686. return 0;
  687. usleep_range(1000, 2000);
  688. }
  689. return -ETIMEDOUT;
  690. }
  691. static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
  692. {
  693. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  694. int ret;
  695. mutex_lock(&ps->smi_mutex);
  696. ret = _mv88e6xxx_wait(ds, reg, offset, mask);
  697. mutex_unlock(&ps->smi_mutex);
  698. return ret;
  699. }
  700. static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
  701. {
  702. return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
  703. GLOBAL2_SMI_OP_BUSY);
  704. }
  705. int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
  706. {
  707. return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
  708. GLOBAL2_EEPROM_OP_LOAD);
  709. }
  710. int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
  711. {
  712. return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
  713. GLOBAL2_EEPROM_OP_BUSY);
  714. }
  715. static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
  716. {
  717. return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
  718. GLOBAL_ATU_OP_BUSY);
  719. }
  720. static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
  721. int regnum)
  722. {
  723. int ret;
  724. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
  725. GLOBAL2_SMI_OP_22_READ | (addr << 5) |
  726. regnum);
  727. if (ret < 0)
  728. return ret;
  729. ret = _mv88e6xxx_phy_wait(ds);
  730. if (ret < 0)
  731. return ret;
  732. return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
  733. }
  734. static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
  735. int regnum, u16 val)
  736. {
  737. int ret;
  738. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
  739. if (ret < 0)
  740. return ret;
  741. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
  742. GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
  743. regnum);
  744. return _mv88e6xxx_phy_wait(ds);
  745. }
  746. int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
  747. {
  748. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  749. int reg;
  750. mutex_lock(&ps->smi_mutex);
  751. reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
  752. if (reg < 0)
  753. goto out;
  754. e->eee_enabled = !!(reg & 0x0200);
  755. e->tx_lpi_enabled = !!(reg & 0x0100);
  756. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
  757. if (reg < 0)
  758. goto out;
  759. e->eee_active = !!(reg & PORT_STATUS_EEE);
  760. reg = 0;
  761. out:
  762. mutex_unlock(&ps->smi_mutex);
  763. return reg;
  764. }
  765. int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
  766. struct phy_device *phydev, struct ethtool_eee *e)
  767. {
  768. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  769. int reg;
  770. int ret;
  771. mutex_lock(&ps->smi_mutex);
  772. ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
  773. if (ret < 0)
  774. goto out;
  775. reg = ret & ~0x0300;
  776. if (e->eee_enabled)
  777. reg |= 0x0200;
  778. if (e->tx_lpi_enabled)
  779. reg |= 0x0100;
  780. ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
  781. out:
  782. mutex_unlock(&ps->smi_mutex);
  783. return ret;
  784. }
  785. static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd)
  786. {
  787. int ret;
  788. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
  789. if (ret < 0)
  790. return ret;
  791. return _mv88e6xxx_atu_wait(ds);
  792. }
  793. static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
  794. struct mv88e6xxx_atu_entry *entry)
  795. {
  796. u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
  797. if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
  798. unsigned int mask, shift;
  799. if (entry->trunk) {
  800. data |= GLOBAL_ATU_DATA_TRUNK;
  801. mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
  802. shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
  803. } else {
  804. mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
  805. shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
  806. }
  807. data |= (entry->portv_trunkid << shift) & mask;
  808. }
  809. return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
  810. }
  811. static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
  812. struct mv88e6xxx_atu_entry *entry,
  813. bool static_too)
  814. {
  815. int op;
  816. int err;
  817. err = _mv88e6xxx_atu_wait(ds);
  818. if (err)
  819. return err;
  820. err = _mv88e6xxx_atu_data_write(ds, entry);
  821. if (err)
  822. return err;
  823. if (entry->fid) {
  824. err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
  825. entry->fid);
  826. if (err)
  827. return err;
  828. op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
  829. GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
  830. } else {
  831. op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
  832. GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
  833. }
  834. return _mv88e6xxx_atu_cmd(ds, op);
  835. }
  836. static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
  837. {
  838. struct mv88e6xxx_atu_entry entry = {
  839. .fid = fid,
  840. .state = 0, /* EntryState bits must be 0 */
  841. };
  842. return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
  843. }
  844. static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
  845. int to_port, bool static_too)
  846. {
  847. struct mv88e6xxx_atu_entry entry = {
  848. .trunk = false,
  849. .fid = fid,
  850. };
  851. /* EntryState bits must be 0xF */
  852. entry.state = GLOBAL_ATU_DATA_STATE_MASK;
  853. /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
  854. entry.portv_trunkid = (to_port & 0x0f) << 4;
  855. entry.portv_trunkid |= from_port & 0x0f;
  856. return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
  857. }
  858. static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
  859. bool static_too)
  860. {
  861. /* Destination port 0xF means remove the entries */
  862. return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
  863. }
  864. static const char * const mv88e6xxx_port_state_names[] = {
  865. [PORT_CONTROL_STATE_DISABLED] = "Disabled",
  866. [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
  867. [PORT_CONTROL_STATE_LEARNING] = "Learning",
  868. [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
  869. };
  870. static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
  871. {
  872. int reg, ret = 0;
  873. u8 oldstate;
  874. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
  875. if (reg < 0)
  876. return reg;
  877. oldstate = reg & PORT_CONTROL_STATE_MASK;
  878. if (oldstate != state) {
  879. /* Flush forwarding database if we're moving a port
  880. * from Learning or Forwarding state to Disabled or
  881. * Blocking or Listening state.
  882. */
  883. if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
  884. oldstate == PORT_CONTROL_STATE_FORWARDING)
  885. && (state == PORT_CONTROL_STATE_DISABLED ||
  886. state == PORT_CONTROL_STATE_BLOCKING)) {
  887. ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
  888. if (ret)
  889. return ret;
  890. }
  891. reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
  892. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
  893. reg);
  894. if (ret)
  895. return ret;
  896. netdev_dbg(ds->ports[port], "PortState %s (was %s)\n",
  897. mv88e6xxx_port_state_names[state],
  898. mv88e6xxx_port_state_names[oldstate]);
  899. }
  900. return ret;
  901. }
  902. static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
  903. {
  904. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  905. struct net_device *bridge = ps->ports[port].bridge_dev;
  906. const u16 mask = (1 << ps->num_ports) - 1;
  907. u16 output_ports = 0;
  908. int reg;
  909. int i;
  910. /* allow CPU port or DSA link(s) to send frames to every port */
  911. if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
  912. output_ports = mask;
  913. } else {
  914. for (i = 0; i < ps->num_ports; ++i) {
  915. /* allow sending frames to every group member */
  916. if (bridge && ps->ports[i].bridge_dev == bridge)
  917. output_ports |= BIT(i);
  918. /* allow sending frames to CPU port and DSA link(s) */
  919. if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
  920. output_ports |= BIT(i);
  921. }
  922. }
  923. /* prevent frames from going back out of the port they came in on */
  924. output_ports &= ~BIT(port);
  925. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
  926. if (reg < 0)
  927. return reg;
  928. reg &= ~mask;
  929. reg |= output_ports & mask;
  930. return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
  931. }
  932. int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
  933. {
  934. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  935. int stp_state;
  936. switch (state) {
  937. case BR_STATE_DISABLED:
  938. stp_state = PORT_CONTROL_STATE_DISABLED;
  939. break;
  940. case BR_STATE_BLOCKING:
  941. case BR_STATE_LISTENING:
  942. stp_state = PORT_CONTROL_STATE_BLOCKING;
  943. break;
  944. case BR_STATE_LEARNING:
  945. stp_state = PORT_CONTROL_STATE_LEARNING;
  946. break;
  947. case BR_STATE_FORWARDING:
  948. default:
  949. stp_state = PORT_CONTROL_STATE_FORWARDING;
  950. break;
  951. }
  952. /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
  953. * so we can not update the port state directly but need to schedule it.
  954. */
  955. ps->ports[port].state = stp_state;
  956. set_bit(port, ps->port_state_update_mask);
  957. schedule_work(&ps->bridge_work);
  958. return 0;
  959. }
  960. static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
  961. u16 *old)
  962. {
  963. u16 pvid;
  964. int ret;
  965. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
  966. if (ret < 0)
  967. return ret;
  968. pvid = ret & PORT_DEFAULT_VLAN_MASK;
  969. if (new) {
  970. ret &= ~PORT_DEFAULT_VLAN_MASK;
  971. ret |= *new & PORT_DEFAULT_VLAN_MASK;
  972. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  973. PORT_DEFAULT_VLAN, ret);
  974. if (ret < 0)
  975. return ret;
  976. netdev_dbg(ds->ports[port], "DefaultVID %d (was %d)\n", *new,
  977. pvid);
  978. }
  979. if (old)
  980. *old = pvid;
  981. return 0;
  982. }
  983. static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
  984. {
  985. return _mv88e6xxx_port_pvid(ds, port, NULL, pvid);
  986. }
  987. static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
  988. {
  989. return _mv88e6xxx_port_pvid(ds, port, &pvid, NULL);
  990. }
  991. static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
  992. {
  993. return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
  994. GLOBAL_VTU_OP_BUSY);
  995. }
  996. static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
  997. {
  998. int ret;
  999. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
  1000. if (ret < 0)
  1001. return ret;
  1002. return _mv88e6xxx_vtu_wait(ds);
  1003. }
  1004. static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
  1005. {
  1006. int ret;
  1007. ret = _mv88e6xxx_vtu_wait(ds);
  1008. if (ret < 0)
  1009. return ret;
  1010. return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
  1011. }
  1012. static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
  1013. struct mv88e6xxx_vtu_stu_entry *entry,
  1014. unsigned int nibble_offset)
  1015. {
  1016. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1017. u16 regs[3];
  1018. int i;
  1019. int ret;
  1020. for (i = 0; i < 3; ++i) {
  1021. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1022. GLOBAL_VTU_DATA_0_3 + i);
  1023. if (ret < 0)
  1024. return ret;
  1025. regs[i] = ret;
  1026. }
  1027. for (i = 0; i < ps->num_ports; ++i) {
  1028. unsigned int shift = (i % 4) * 4 + nibble_offset;
  1029. u16 reg = regs[i / 4];
  1030. entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
  1031. }
  1032. return 0;
  1033. }
  1034. static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
  1035. struct mv88e6xxx_vtu_stu_entry *entry,
  1036. unsigned int nibble_offset)
  1037. {
  1038. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1039. u16 regs[3] = { 0 };
  1040. int i;
  1041. int ret;
  1042. for (i = 0; i < ps->num_ports; ++i) {
  1043. unsigned int shift = (i % 4) * 4 + nibble_offset;
  1044. u8 data = entry->data[i];
  1045. regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
  1046. }
  1047. for (i = 0; i < 3; ++i) {
  1048. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
  1049. GLOBAL_VTU_DATA_0_3 + i, regs[i]);
  1050. if (ret < 0)
  1051. return ret;
  1052. }
  1053. return 0;
  1054. }
  1055. static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
  1056. {
  1057. return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
  1058. vid & GLOBAL_VTU_VID_MASK);
  1059. }
  1060. static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
  1061. struct mv88e6xxx_vtu_stu_entry *entry)
  1062. {
  1063. struct mv88e6xxx_vtu_stu_entry next = { 0 };
  1064. int ret;
  1065. ret = _mv88e6xxx_vtu_wait(ds);
  1066. if (ret < 0)
  1067. return ret;
  1068. ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
  1069. if (ret < 0)
  1070. return ret;
  1071. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
  1072. if (ret < 0)
  1073. return ret;
  1074. next.vid = ret & GLOBAL_VTU_VID_MASK;
  1075. next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
  1076. if (next.valid) {
  1077. ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
  1078. if (ret < 0)
  1079. return ret;
  1080. if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
  1081. mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
  1082. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1083. GLOBAL_VTU_FID);
  1084. if (ret < 0)
  1085. return ret;
  1086. next.fid = ret & GLOBAL_VTU_FID_MASK;
  1087. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1088. GLOBAL_VTU_SID);
  1089. if (ret < 0)
  1090. return ret;
  1091. next.sid = ret & GLOBAL_VTU_SID_MASK;
  1092. }
  1093. }
  1094. *entry = next;
  1095. return 0;
  1096. }
  1097. int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
  1098. struct switchdev_obj_port_vlan *vlan,
  1099. int (*cb)(struct switchdev_obj *obj))
  1100. {
  1101. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1102. struct mv88e6xxx_vtu_stu_entry next;
  1103. u16 pvid;
  1104. int err;
  1105. mutex_lock(&ps->smi_mutex);
  1106. err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
  1107. if (err)
  1108. goto unlock;
  1109. err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
  1110. if (err)
  1111. goto unlock;
  1112. do {
  1113. err = _mv88e6xxx_vtu_getnext(ds, &next);
  1114. if (err)
  1115. break;
  1116. if (!next.valid)
  1117. break;
  1118. if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
  1119. continue;
  1120. /* reinit and dump this VLAN obj */
  1121. vlan->vid_begin = vlan->vid_end = next.vid;
  1122. vlan->flags = 0;
  1123. if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
  1124. vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  1125. if (next.vid == pvid)
  1126. vlan->flags |= BRIDGE_VLAN_INFO_PVID;
  1127. err = cb(&vlan->obj);
  1128. if (err)
  1129. break;
  1130. } while (next.vid < GLOBAL_VTU_VID_MASK);
  1131. unlock:
  1132. mutex_unlock(&ps->smi_mutex);
  1133. return err;
  1134. }
  1135. static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
  1136. struct mv88e6xxx_vtu_stu_entry *entry)
  1137. {
  1138. u16 reg = 0;
  1139. int ret;
  1140. ret = _mv88e6xxx_vtu_wait(ds);
  1141. if (ret < 0)
  1142. return ret;
  1143. if (!entry->valid)
  1144. goto loadpurge;
  1145. /* Write port member tags */
  1146. ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
  1147. if (ret < 0)
  1148. return ret;
  1149. if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
  1150. mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
  1151. reg = entry->sid & GLOBAL_VTU_SID_MASK;
  1152. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
  1153. if (ret < 0)
  1154. return ret;
  1155. reg = entry->fid & GLOBAL_VTU_FID_MASK;
  1156. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
  1157. if (ret < 0)
  1158. return ret;
  1159. }
  1160. reg = GLOBAL_VTU_VID_VALID;
  1161. loadpurge:
  1162. reg |= entry->vid & GLOBAL_VTU_VID_MASK;
  1163. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
  1164. if (ret < 0)
  1165. return ret;
  1166. return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
  1167. }
  1168. static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
  1169. struct mv88e6xxx_vtu_stu_entry *entry)
  1170. {
  1171. struct mv88e6xxx_vtu_stu_entry next = { 0 };
  1172. int ret;
  1173. ret = _mv88e6xxx_vtu_wait(ds);
  1174. if (ret < 0)
  1175. return ret;
  1176. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
  1177. sid & GLOBAL_VTU_SID_MASK);
  1178. if (ret < 0)
  1179. return ret;
  1180. ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
  1181. if (ret < 0)
  1182. return ret;
  1183. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
  1184. if (ret < 0)
  1185. return ret;
  1186. next.sid = ret & GLOBAL_VTU_SID_MASK;
  1187. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
  1188. if (ret < 0)
  1189. return ret;
  1190. next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
  1191. if (next.valid) {
  1192. ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
  1193. if (ret < 0)
  1194. return ret;
  1195. }
  1196. *entry = next;
  1197. return 0;
  1198. }
  1199. static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
  1200. struct mv88e6xxx_vtu_stu_entry *entry)
  1201. {
  1202. u16 reg = 0;
  1203. int ret;
  1204. ret = _mv88e6xxx_vtu_wait(ds);
  1205. if (ret < 0)
  1206. return ret;
  1207. if (!entry->valid)
  1208. goto loadpurge;
  1209. /* Write port states */
  1210. ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
  1211. if (ret < 0)
  1212. return ret;
  1213. reg = GLOBAL_VTU_VID_VALID;
  1214. loadpurge:
  1215. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
  1216. if (ret < 0)
  1217. return ret;
  1218. reg = entry->sid & GLOBAL_VTU_SID_MASK;
  1219. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
  1220. if (ret < 0)
  1221. return ret;
  1222. return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
  1223. }
  1224. static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
  1225. u16 *old)
  1226. {
  1227. u16 fid;
  1228. int ret;
  1229. /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
  1230. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
  1231. if (ret < 0)
  1232. return ret;
  1233. fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
  1234. if (new) {
  1235. ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
  1236. ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
  1237. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN,
  1238. ret);
  1239. if (ret < 0)
  1240. return ret;
  1241. }
  1242. /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
  1243. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_1);
  1244. if (ret < 0)
  1245. return ret;
  1246. fid |= (ret & PORT_CONTROL_1_FID_11_4_MASK) << 4;
  1247. if (new) {
  1248. ret &= ~PORT_CONTROL_1_FID_11_4_MASK;
  1249. ret |= (*new >> 4) & PORT_CONTROL_1_FID_11_4_MASK;
  1250. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1,
  1251. ret);
  1252. if (ret < 0)
  1253. return ret;
  1254. netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid);
  1255. }
  1256. if (old)
  1257. *old = fid;
  1258. return 0;
  1259. }
  1260. static int _mv88e6xxx_port_fid_get(struct dsa_switch *ds, int port, u16 *fid)
  1261. {
  1262. return _mv88e6xxx_port_fid(ds, port, NULL, fid);
  1263. }
  1264. static int _mv88e6xxx_port_fid_set(struct dsa_switch *ds, int port, u16 fid)
  1265. {
  1266. return _mv88e6xxx_port_fid(ds, port, &fid, NULL);
  1267. }
  1268. static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
  1269. {
  1270. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1271. DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
  1272. struct mv88e6xxx_vtu_stu_entry vlan;
  1273. int i, err;
  1274. bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
  1275. /* Set every FID bit used by the (un)bridged ports */
  1276. for (i = 0; i < ps->num_ports; ++i) {
  1277. err = _mv88e6xxx_port_fid_get(ds, i, fid);
  1278. if (err)
  1279. return err;
  1280. set_bit(*fid, fid_bitmap);
  1281. }
  1282. /* Set every FID bit used by the VLAN entries */
  1283. err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
  1284. if (err)
  1285. return err;
  1286. do {
  1287. err = _mv88e6xxx_vtu_getnext(ds, &vlan);
  1288. if (err)
  1289. return err;
  1290. if (!vlan.valid)
  1291. break;
  1292. set_bit(vlan.fid, fid_bitmap);
  1293. } while (vlan.vid < GLOBAL_VTU_VID_MASK);
  1294. /* The reset value 0x000 is used to indicate that multiple address
  1295. * databases are not needed. Return the next positive available.
  1296. */
  1297. *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
  1298. if (unlikely(*fid == MV88E6XXX_N_FID))
  1299. return -ENOSPC;
  1300. /* Clear the database */
  1301. return _mv88e6xxx_atu_flush(ds, *fid, true);
  1302. }
  1303. static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
  1304. struct mv88e6xxx_vtu_stu_entry *entry)
  1305. {
  1306. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1307. struct mv88e6xxx_vtu_stu_entry vlan = {
  1308. .valid = true,
  1309. .vid = vid,
  1310. };
  1311. int i, err;
  1312. err = _mv88e6xxx_fid_new(ds, &vlan.fid);
  1313. if (err)
  1314. return err;
  1315. /* exclude all ports except the CPU and DSA ports */
  1316. for (i = 0; i < ps->num_ports; ++i)
  1317. vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
  1318. ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
  1319. : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
  1320. if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
  1321. mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
  1322. struct mv88e6xxx_vtu_stu_entry vstp;
  1323. /* Adding a VTU entry requires a valid STU entry. As VSTP is not
  1324. * implemented, only one STU entry is needed to cover all VTU
  1325. * entries. Thus, validate the SID 0.
  1326. */
  1327. vlan.sid = 0;
  1328. err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
  1329. if (err)
  1330. return err;
  1331. if (vstp.sid != vlan.sid || !vstp.valid) {
  1332. memset(&vstp, 0, sizeof(vstp));
  1333. vstp.valid = true;
  1334. vstp.sid = vlan.sid;
  1335. err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
  1336. if (err)
  1337. return err;
  1338. }
  1339. }
  1340. *entry = vlan;
  1341. return 0;
  1342. }
  1343. static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
  1344. struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
  1345. {
  1346. int err;
  1347. if (!vid)
  1348. return -EINVAL;
  1349. err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
  1350. if (err)
  1351. return err;
  1352. err = _mv88e6xxx_vtu_getnext(ds, entry);
  1353. if (err)
  1354. return err;
  1355. if (entry->vid != vid || !entry->valid) {
  1356. if (!creat)
  1357. return -EOPNOTSUPP;
  1358. /* -ENOENT would've been more appropriate, but switchdev expects
  1359. * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
  1360. */
  1361. err = _mv88e6xxx_vtu_new(ds, vid, entry);
  1362. }
  1363. return err;
  1364. }
  1365. static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
  1366. u16 vid_begin, u16 vid_end)
  1367. {
  1368. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1369. struct mv88e6xxx_vtu_stu_entry vlan;
  1370. int i, err;
  1371. if (!vid_begin)
  1372. return -EOPNOTSUPP;
  1373. mutex_lock(&ps->smi_mutex);
  1374. err = _mv88e6xxx_vtu_vid_write(ds, vid_begin - 1);
  1375. if (err)
  1376. goto unlock;
  1377. do {
  1378. err = _mv88e6xxx_vtu_getnext(ds, &vlan);
  1379. if (err)
  1380. goto unlock;
  1381. if (!vlan.valid)
  1382. break;
  1383. if (vlan.vid > vid_end)
  1384. break;
  1385. for (i = 0; i < ps->num_ports; ++i) {
  1386. if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
  1387. continue;
  1388. if (vlan.data[i] ==
  1389. GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
  1390. continue;
  1391. if (ps->ports[i].bridge_dev ==
  1392. ps->ports[port].bridge_dev)
  1393. break; /* same bridge, check next VLAN */
  1394. netdev_warn(ds->ports[port],
  1395. "hardware VLAN %d already used by %s\n",
  1396. vlan.vid,
  1397. netdev_name(ps->ports[i].bridge_dev));
  1398. err = -EOPNOTSUPP;
  1399. goto unlock;
  1400. }
  1401. } while (vlan.vid < vid_end);
  1402. unlock:
  1403. mutex_unlock(&ps->smi_mutex);
  1404. return err;
  1405. }
  1406. static const char * const mv88e6xxx_port_8021q_mode_names[] = {
  1407. [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
  1408. [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
  1409. [PORT_CONTROL_2_8021Q_CHECK] = "Check",
  1410. [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
  1411. };
  1412. int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
  1413. bool vlan_filtering)
  1414. {
  1415. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1416. u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
  1417. PORT_CONTROL_2_8021Q_DISABLED;
  1418. int ret;
  1419. mutex_lock(&ps->smi_mutex);
  1420. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_2);
  1421. if (ret < 0)
  1422. goto unlock;
  1423. old = ret & PORT_CONTROL_2_8021Q_MASK;
  1424. if (new != old) {
  1425. ret &= ~PORT_CONTROL_2_8021Q_MASK;
  1426. ret |= new & PORT_CONTROL_2_8021Q_MASK;
  1427. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2,
  1428. ret);
  1429. if (ret < 0)
  1430. goto unlock;
  1431. netdev_dbg(ds->ports[port], "802.1Q Mode %s (was %s)\n",
  1432. mv88e6xxx_port_8021q_mode_names[new],
  1433. mv88e6xxx_port_8021q_mode_names[old]);
  1434. }
  1435. ret = 0;
  1436. unlock:
  1437. mutex_unlock(&ps->smi_mutex);
  1438. return ret;
  1439. }
  1440. int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
  1441. const struct switchdev_obj_port_vlan *vlan,
  1442. struct switchdev_trans *trans)
  1443. {
  1444. int err;
  1445. /* If the requested port doesn't belong to the same bridge as the VLAN
  1446. * members, do not support it (yet) and fallback to software VLAN.
  1447. */
  1448. err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
  1449. vlan->vid_end);
  1450. if (err)
  1451. return err;
  1452. /* We don't need any dynamic resource from the kernel (yet),
  1453. * so skip the prepare phase.
  1454. */
  1455. return 0;
  1456. }
  1457. static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
  1458. bool untagged)
  1459. {
  1460. struct mv88e6xxx_vtu_stu_entry vlan;
  1461. int err;
  1462. err = _mv88e6xxx_vtu_get(ds, vid, &vlan, true);
  1463. if (err)
  1464. return err;
  1465. vlan.data[port] = untagged ?
  1466. GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
  1467. GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
  1468. return _mv88e6xxx_vtu_loadpurge(ds, &vlan);
  1469. }
  1470. int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
  1471. const struct switchdev_obj_port_vlan *vlan,
  1472. struct switchdev_trans *trans)
  1473. {
  1474. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1475. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  1476. bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  1477. u16 vid;
  1478. int err = 0;
  1479. mutex_lock(&ps->smi_mutex);
  1480. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1481. err = _mv88e6xxx_port_vlan_add(ds, port, vid, untagged);
  1482. if (err)
  1483. goto unlock;
  1484. }
  1485. /* no PVID with ranges, otherwise it's a bug */
  1486. if (pvid)
  1487. err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
  1488. unlock:
  1489. mutex_unlock(&ps->smi_mutex);
  1490. return err;
  1491. }
  1492. static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
  1493. {
  1494. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1495. struct mv88e6xxx_vtu_stu_entry vlan;
  1496. int i, err;
  1497. err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
  1498. if (err)
  1499. return err;
  1500. /* Tell switchdev if this VLAN is handled in software */
  1501. if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
  1502. return -EOPNOTSUPP;
  1503. vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
  1504. /* keep the VLAN unless all ports are excluded */
  1505. vlan.valid = false;
  1506. for (i = 0; i < ps->num_ports; ++i) {
  1507. if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
  1508. continue;
  1509. if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
  1510. vlan.valid = true;
  1511. break;
  1512. }
  1513. }
  1514. err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
  1515. if (err)
  1516. return err;
  1517. return _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
  1518. }
  1519. int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
  1520. const struct switchdev_obj_port_vlan *vlan)
  1521. {
  1522. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1523. u16 pvid, vid;
  1524. int err = 0;
  1525. mutex_lock(&ps->smi_mutex);
  1526. err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
  1527. if (err)
  1528. goto unlock;
  1529. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1530. err = _mv88e6xxx_port_vlan_del(ds, port, vid);
  1531. if (err)
  1532. goto unlock;
  1533. if (vid == pvid) {
  1534. err = _mv88e6xxx_port_pvid_set(ds, port, 0);
  1535. if (err)
  1536. goto unlock;
  1537. }
  1538. }
  1539. unlock:
  1540. mutex_unlock(&ps->smi_mutex);
  1541. return err;
  1542. }
  1543. static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
  1544. const unsigned char *addr)
  1545. {
  1546. int i, ret;
  1547. for (i = 0; i < 3; i++) {
  1548. ret = _mv88e6xxx_reg_write(
  1549. ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
  1550. (addr[i * 2] << 8) | addr[i * 2 + 1]);
  1551. if (ret < 0)
  1552. return ret;
  1553. }
  1554. return 0;
  1555. }
  1556. static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
  1557. {
  1558. int i, ret;
  1559. for (i = 0; i < 3; i++) {
  1560. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1561. GLOBAL_ATU_MAC_01 + i);
  1562. if (ret < 0)
  1563. return ret;
  1564. addr[i * 2] = ret >> 8;
  1565. addr[i * 2 + 1] = ret & 0xff;
  1566. }
  1567. return 0;
  1568. }
  1569. static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
  1570. struct mv88e6xxx_atu_entry *entry)
  1571. {
  1572. int ret;
  1573. ret = _mv88e6xxx_atu_wait(ds);
  1574. if (ret < 0)
  1575. return ret;
  1576. ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
  1577. if (ret < 0)
  1578. return ret;
  1579. ret = _mv88e6xxx_atu_data_write(ds, entry);
  1580. if (ret < 0)
  1581. return ret;
  1582. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid);
  1583. if (ret < 0)
  1584. return ret;
  1585. return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
  1586. }
  1587. static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
  1588. const unsigned char *addr, u16 vid,
  1589. u8 state)
  1590. {
  1591. struct mv88e6xxx_atu_entry entry = { 0 };
  1592. struct mv88e6xxx_vtu_stu_entry vlan;
  1593. int err;
  1594. /* Null VLAN ID corresponds to the port private database */
  1595. if (vid == 0)
  1596. err = _mv88e6xxx_port_fid_get(ds, port, &vlan.fid);
  1597. else
  1598. err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
  1599. if (err)
  1600. return err;
  1601. entry.fid = vlan.fid;
  1602. entry.state = state;
  1603. ether_addr_copy(entry.mac, addr);
  1604. if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
  1605. entry.trunk = false;
  1606. entry.portv_trunkid = BIT(port);
  1607. }
  1608. return _mv88e6xxx_atu_load(ds, &entry);
  1609. }
  1610. int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
  1611. const struct switchdev_obj_port_fdb *fdb,
  1612. struct switchdev_trans *trans)
  1613. {
  1614. /* We don't need any dynamic resource from the kernel (yet),
  1615. * so skip the prepare phase.
  1616. */
  1617. return 0;
  1618. }
  1619. int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
  1620. const struct switchdev_obj_port_fdb *fdb,
  1621. struct switchdev_trans *trans)
  1622. {
  1623. int state = is_multicast_ether_addr(fdb->addr) ?
  1624. GLOBAL_ATU_DATA_STATE_MC_STATIC :
  1625. GLOBAL_ATU_DATA_STATE_UC_STATIC;
  1626. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1627. int ret;
  1628. mutex_lock(&ps->smi_mutex);
  1629. ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state);
  1630. mutex_unlock(&ps->smi_mutex);
  1631. return ret;
  1632. }
  1633. int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
  1634. const struct switchdev_obj_port_fdb *fdb)
  1635. {
  1636. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1637. int ret;
  1638. mutex_lock(&ps->smi_mutex);
  1639. ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
  1640. GLOBAL_ATU_DATA_STATE_UNUSED);
  1641. mutex_unlock(&ps->smi_mutex);
  1642. return ret;
  1643. }
  1644. static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
  1645. struct mv88e6xxx_atu_entry *entry)
  1646. {
  1647. struct mv88e6xxx_atu_entry next = { 0 };
  1648. int ret;
  1649. next.fid = fid;
  1650. ret = _mv88e6xxx_atu_wait(ds);
  1651. if (ret < 0)
  1652. return ret;
  1653. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
  1654. if (ret < 0)
  1655. return ret;
  1656. ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
  1657. if (ret < 0)
  1658. return ret;
  1659. ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
  1660. if (ret < 0)
  1661. return ret;
  1662. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
  1663. if (ret < 0)
  1664. return ret;
  1665. next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
  1666. if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
  1667. unsigned int mask, shift;
  1668. if (ret & GLOBAL_ATU_DATA_TRUNK) {
  1669. next.trunk = true;
  1670. mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
  1671. shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
  1672. } else {
  1673. next.trunk = false;
  1674. mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
  1675. shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
  1676. }
  1677. next.portv_trunkid = (ret & mask) >> shift;
  1678. }
  1679. *entry = next;
  1680. return 0;
  1681. }
  1682. static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
  1683. int port,
  1684. struct switchdev_obj_port_fdb *fdb,
  1685. int (*cb)(struct switchdev_obj *obj))
  1686. {
  1687. struct mv88e6xxx_atu_entry addr = {
  1688. .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
  1689. };
  1690. int err;
  1691. err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
  1692. if (err)
  1693. return err;
  1694. do {
  1695. err = _mv88e6xxx_atu_getnext(ds, fid, &addr);
  1696. if (err)
  1697. break;
  1698. if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
  1699. break;
  1700. if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
  1701. bool is_static = addr.state ==
  1702. (is_multicast_ether_addr(addr.mac) ?
  1703. GLOBAL_ATU_DATA_STATE_MC_STATIC :
  1704. GLOBAL_ATU_DATA_STATE_UC_STATIC);
  1705. fdb->vid = vid;
  1706. ether_addr_copy(fdb->addr, addr.mac);
  1707. fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
  1708. err = cb(&fdb->obj);
  1709. if (err)
  1710. break;
  1711. }
  1712. } while (!is_broadcast_ether_addr(addr.mac));
  1713. return err;
  1714. }
  1715. int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
  1716. struct switchdev_obj_port_fdb *fdb,
  1717. int (*cb)(struct switchdev_obj *obj))
  1718. {
  1719. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1720. struct mv88e6xxx_vtu_stu_entry vlan = {
  1721. .vid = GLOBAL_VTU_VID_MASK, /* all ones */
  1722. };
  1723. u16 fid;
  1724. int err;
  1725. mutex_lock(&ps->smi_mutex);
  1726. /* Dump port's default Filtering Information Database (VLAN ID 0) */
  1727. err = _mv88e6xxx_port_fid_get(ds, port, &fid);
  1728. if (err)
  1729. goto unlock;
  1730. err = _mv88e6xxx_port_fdb_dump_one(ds, fid, 0, port, fdb, cb);
  1731. if (err)
  1732. goto unlock;
  1733. /* Dump VLANs' Filtering Information Databases */
  1734. err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
  1735. if (err)
  1736. goto unlock;
  1737. do {
  1738. err = _mv88e6xxx_vtu_getnext(ds, &vlan);
  1739. if (err)
  1740. break;
  1741. if (!vlan.valid)
  1742. break;
  1743. err = _mv88e6xxx_port_fdb_dump_one(ds, vlan.fid, vlan.vid, port,
  1744. fdb, cb);
  1745. if (err)
  1746. break;
  1747. } while (vlan.vid < GLOBAL_VTU_VID_MASK);
  1748. unlock:
  1749. mutex_unlock(&ps->smi_mutex);
  1750. return err;
  1751. }
  1752. int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
  1753. struct net_device *bridge)
  1754. {
  1755. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1756. u16 fid;
  1757. int i, err;
  1758. mutex_lock(&ps->smi_mutex);
  1759. /* Get or create the bridge FID and assign it to the port */
  1760. for (i = 0; i < ps->num_ports; ++i)
  1761. if (ps->ports[i].bridge_dev == bridge)
  1762. break;
  1763. if (i < ps->num_ports)
  1764. err = _mv88e6xxx_port_fid_get(ds, i, &fid);
  1765. else
  1766. err = _mv88e6xxx_fid_new(ds, &fid);
  1767. if (err)
  1768. goto unlock;
  1769. err = _mv88e6xxx_port_fid_set(ds, port, fid);
  1770. if (err)
  1771. goto unlock;
  1772. /* Assign the bridge and remap each port's VLANTable */
  1773. ps->ports[port].bridge_dev = bridge;
  1774. for (i = 0; i < ps->num_ports; ++i) {
  1775. if (ps->ports[i].bridge_dev == bridge) {
  1776. err = _mv88e6xxx_port_based_vlan_map(ds, i);
  1777. if (err)
  1778. break;
  1779. }
  1780. }
  1781. unlock:
  1782. mutex_unlock(&ps->smi_mutex);
  1783. return err;
  1784. }
  1785. void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
  1786. {
  1787. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1788. struct net_device *bridge = ps->ports[port].bridge_dev;
  1789. u16 fid;
  1790. int i;
  1791. mutex_lock(&ps->smi_mutex);
  1792. /* Give the port a fresh Filtering Information Database */
  1793. if (_mv88e6xxx_fid_new(ds, &fid) ||
  1794. _mv88e6xxx_port_fid_set(ds, port, fid))
  1795. netdev_warn(ds->ports[port], "failed to assign a new FID\n");
  1796. /* Unassign the bridge and remap each port's VLANTable */
  1797. ps->ports[port].bridge_dev = NULL;
  1798. for (i = 0; i < ps->num_ports; ++i)
  1799. if (i == port || ps->ports[i].bridge_dev == bridge)
  1800. if (_mv88e6xxx_port_based_vlan_map(ds, i))
  1801. netdev_warn(ds->ports[i], "failed to remap\n");
  1802. mutex_unlock(&ps->smi_mutex);
  1803. }
  1804. static void mv88e6xxx_bridge_work(struct work_struct *work)
  1805. {
  1806. struct mv88e6xxx_priv_state *ps;
  1807. struct dsa_switch *ds;
  1808. int port;
  1809. ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
  1810. ds = ((struct dsa_switch *)ps) - 1;
  1811. mutex_lock(&ps->smi_mutex);
  1812. for (port = 0; port < ps->num_ports; ++port)
  1813. if (test_and_clear_bit(port, ps->port_state_update_mask) &&
  1814. _mv88e6xxx_port_state(ds, port, ps->ports[port].state))
  1815. netdev_warn(ds->ports[port], "failed to update state to %s\n",
  1816. mv88e6xxx_port_state_names[ps->ports[port].state]);
  1817. mutex_unlock(&ps->smi_mutex);
  1818. }
  1819. static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
  1820. {
  1821. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1822. int ret;
  1823. u16 reg;
  1824. mutex_lock(&ps->smi_mutex);
  1825. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1826. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1827. mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
  1828. mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
  1829. /* MAC Forcing register: don't force link, speed,
  1830. * duplex or flow control state to any particular
  1831. * values on physical ports, but force the CPU port
  1832. * and all DSA ports to their maximum bandwidth and
  1833. * full duplex.
  1834. */
  1835. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
  1836. if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
  1837. reg &= ~PORT_PCS_CTRL_UNFORCED;
  1838. reg |= PORT_PCS_CTRL_FORCE_LINK |
  1839. PORT_PCS_CTRL_LINK_UP |
  1840. PORT_PCS_CTRL_DUPLEX_FULL |
  1841. PORT_PCS_CTRL_FORCE_DUPLEX;
  1842. if (mv88e6xxx_6065_family(ds))
  1843. reg |= PORT_PCS_CTRL_100;
  1844. else
  1845. reg |= PORT_PCS_CTRL_1000;
  1846. } else {
  1847. reg |= PORT_PCS_CTRL_UNFORCED;
  1848. }
  1849. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1850. PORT_PCS_CTRL, reg);
  1851. if (ret)
  1852. goto abort;
  1853. }
  1854. /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
  1855. * disable Header mode, enable IGMP/MLD snooping, disable VLAN
  1856. * tunneling, determine priority by looking at 802.1p and IP
  1857. * priority fields (IP prio has precedence), and set STP state
  1858. * to Forwarding.
  1859. *
  1860. * If this is the CPU link, use DSA or EDSA tagging depending
  1861. * on which tagging mode was configured.
  1862. *
  1863. * If this is a link to another switch, use DSA tagging mode.
  1864. *
  1865. * If this is the upstream port for this switch, enable
  1866. * forwarding of unknown unicasts and multicasts.
  1867. */
  1868. reg = 0;
  1869. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1870. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1871. mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
  1872. mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
  1873. reg = PORT_CONTROL_IGMP_MLD_SNOOP |
  1874. PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
  1875. PORT_CONTROL_STATE_FORWARDING;
  1876. if (dsa_is_cpu_port(ds, port)) {
  1877. if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
  1878. reg |= PORT_CONTROL_DSA_TAG;
  1879. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1880. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1881. mv88e6xxx_6320_family(ds)) {
  1882. if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
  1883. reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
  1884. else
  1885. reg |= PORT_CONTROL_FRAME_MODE_DSA;
  1886. reg |= PORT_CONTROL_FORWARD_UNKNOWN |
  1887. PORT_CONTROL_FORWARD_UNKNOWN_MC;
  1888. }
  1889. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1890. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1891. mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
  1892. mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
  1893. if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
  1894. reg |= PORT_CONTROL_EGRESS_ADD_TAG;
  1895. }
  1896. }
  1897. if (dsa_is_dsa_port(ds, port)) {
  1898. if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
  1899. reg |= PORT_CONTROL_DSA_TAG;
  1900. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1901. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1902. mv88e6xxx_6320_family(ds)) {
  1903. reg |= PORT_CONTROL_FRAME_MODE_DSA;
  1904. }
  1905. if (port == dsa_upstream_port(ds))
  1906. reg |= PORT_CONTROL_FORWARD_UNKNOWN |
  1907. PORT_CONTROL_FORWARD_UNKNOWN_MC;
  1908. }
  1909. if (reg) {
  1910. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1911. PORT_CONTROL, reg);
  1912. if (ret)
  1913. goto abort;
  1914. }
  1915. /* Port Control 2: don't force a good FCS, set the maximum frame size to
  1916. * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
  1917. * untagged frames on this port, do a destination address lookup on all
  1918. * received packets as usual, disable ARP mirroring and don't send a
  1919. * copy of all transmitted/received frames on this port to the CPU.
  1920. */
  1921. reg = 0;
  1922. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1923. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1924. mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
  1925. reg = PORT_CONTROL_2_MAP_DA;
  1926. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1927. mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
  1928. reg |= PORT_CONTROL_2_JUMBO_10240;
  1929. if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
  1930. /* Set the upstream port this port should use */
  1931. reg |= dsa_upstream_port(ds);
  1932. /* enable forwarding of unknown multicast addresses to
  1933. * the upstream port
  1934. */
  1935. if (port == dsa_upstream_port(ds))
  1936. reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
  1937. }
  1938. reg |= PORT_CONTROL_2_8021Q_DISABLED;
  1939. if (reg) {
  1940. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1941. PORT_CONTROL_2, reg);
  1942. if (ret)
  1943. goto abort;
  1944. }
  1945. /* Port Association Vector: when learning source addresses
  1946. * of packets, add the address to the address database using
  1947. * a port bitmap that has only the bit for this port set and
  1948. * the other bits clear.
  1949. */
  1950. reg = 1 << port;
  1951. /* Disable learning for DSA and CPU ports */
  1952. if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
  1953. reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
  1954. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
  1955. if (ret)
  1956. goto abort;
  1957. /* Egress rate control 2: disable egress rate control. */
  1958. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
  1959. 0x0000);
  1960. if (ret)
  1961. goto abort;
  1962. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1963. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1964. mv88e6xxx_6320_family(ds)) {
  1965. /* Do not limit the period of time that this port can
  1966. * be paused for by the remote end or the period of
  1967. * time that this port can pause the remote end.
  1968. */
  1969. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1970. PORT_PAUSE_CTRL, 0x0000);
  1971. if (ret)
  1972. goto abort;
  1973. /* Port ATU control: disable limiting the number of
  1974. * address database entries that this port is allowed
  1975. * to use.
  1976. */
  1977. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1978. PORT_ATU_CONTROL, 0x0000);
  1979. /* Priority Override: disable DA, SA and VTU priority
  1980. * override.
  1981. */
  1982. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1983. PORT_PRI_OVERRIDE, 0x0000);
  1984. if (ret)
  1985. goto abort;
  1986. /* Port Ethertype: use the Ethertype DSA Ethertype
  1987. * value.
  1988. */
  1989. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1990. PORT_ETH_TYPE, ETH_P_EDSA);
  1991. if (ret)
  1992. goto abort;
  1993. /* Tag Remap: use an identity 802.1p prio -> switch
  1994. * prio mapping.
  1995. */
  1996. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1997. PORT_TAG_REGMAP_0123, 0x3210);
  1998. if (ret)
  1999. goto abort;
  2000. /* Tag Remap 2: use an identity 802.1p prio -> switch
  2001. * prio mapping.
  2002. */
  2003. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  2004. PORT_TAG_REGMAP_4567, 0x7654);
  2005. if (ret)
  2006. goto abort;
  2007. }
  2008. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  2009. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  2010. mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
  2011. mv88e6xxx_6320_family(ds)) {
  2012. /* Rate Control: disable ingress rate limiting. */
  2013. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  2014. PORT_RATE_CONTROL, 0x0001);
  2015. if (ret)
  2016. goto abort;
  2017. }
  2018. /* Port Control 1: disable trunking, disable sending
  2019. * learning messages to this port.
  2020. */
  2021. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
  2022. if (ret)
  2023. goto abort;
  2024. /* Port based VLAN map: give each port its own address
  2025. * database, and allow bidirectional communication between the
  2026. * CPU and DSA port(s), and the other ports.
  2027. */
  2028. ret = _mv88e6xxx_port_fid_set(ds, port, port + 1);
  2029. if (ret)
  2030. goto abort;
  2031. ret = _mv88e6xxx_port_based_vlan_map(ds, port);
  2032. if (ret)
  2033. goto abort;
  2034. /* Default VLAN ID and priority: don't set a default VLAN
  2035. * ID, and set the default packet priority to zero.
  2036. */
  2037. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
  2038. 0x0000);
  2039. abort:
  2040. mutex_unlock(&ps->smi_mutex);
  2041. return ret;
  2042. }
  2043. int mv88e6xxx_setup_ports(struct dsa_switch *ds)
  2044. {
  2045. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2046. int ret;
  2047. int i;
  2048. for (i = 0; i < ps->num_ports; i++) {
  2049. ret = mv88e6xxx_setup_port(ds, i);
  2050. if (ret < 0)
  2051. return ret;
  2052. }
  2053. return 0;
  2054. }
  2055. int mv88e6xxx_setup_common(struct dsa_switch *ds)
  2056. {
  2057. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2058. mutex_init(&ps->smi_mutex);
  2059. ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
  2060. INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
  2061. return 0;
  2062. }
  2063. int mv88e6xxx_setup_global(struct dsa_switch *ds)
  2064. {
  2065. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2066. int ret;
  2067. int i;
  2068. /* Set the default address aging time to 5 minutes, and
  2069. * enable address learn messages to be sent to all message
  2070. * ports.
  2071. */
  2072. REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
  2073. 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
  2074. /* Configure the IP ToS mapping registers. */
  2075. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
  2076. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
  2077. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
  2078. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
  2079. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
  2080. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
  2081. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
  2082. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
  2083. /* Configure the IEEE 802.1p priority mapping register. */
  2084. REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
  2085. /* Send all frames with destination addresses matching
  2086. * 01:80:c2:00:00:0x to the CPU port.
  2087. */
  2088. REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
  2089. /* Ignore removed tag data on doubly tagged packets, disable
  2090. * flow control messages, force flow control priority to the
  2091. * highest, and send all special multicast frames to the CPU
  2092. * port at the highest priority.
  2093. */
  2094. REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
  2095. 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
  2096. GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
  2097. /* Program the DSA routing table. */
  2098. for (i = 0; i < 32; i++) {
  2099. int nexthop = 0x1f;
  2100. if (ds->pd->rtable &&
  2101. i != ds->index && i < ds->dst->pd->nr_chips)
  2102. nexthop = ds->pd->rtable[i] & 0x1f;
  2103. REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
  2104. GLOBAL2_DEVICE_MAPPING_UPDATE |
  2105. (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
  2106. nexthop);
  2107. }
  2108. /* Clear all trunk masks. */
  2109. for (i = 0; i < 8; i++)
  2110. REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
  2111. 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
  2112. ((1 << ps->num_ports) - 1));
  2113. /* Clear all trunk mappings. */
  2114. for (i = 0; i < 16; i++)
  2115. REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
  2116. GLOBAL2_TRUNK_MAPPING_UPDATE |
  2117. (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
  2118. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  2119. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  2120. mv88e6xxx_6320_family(ds)) {
  2121. /* Send all frames with destination addresses matching
  2122. * 01:80:c2:00:00:2x to the CPU port.
  2123. */
  2124. REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
  2125. /* Initialise cross-chip port VLAN table to reset
  2126. * defaults.
  2127. */
  2128. REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
  2129. /* Clear the priority override table. */
  2130. for (i = 0; i < 16; i++)
  2131. REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
  2132. 0x8000 | (i << 8));
  2133. }
  2134. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  2135. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  2136. mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
  2137. mv88e6xxx_6320_family(ds)) {
  2138. /* Disable ingress rate limiting by resetting all
  2139. * ingress rate limit registers to their initial
  2140. * state.
  2141. */
  2142. for (i = 0; i < ps->num_ports; i++)
  2143. REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
  2144. 0x9000 | (i << 8));
  2145. }
  2146. /* Clear the statistics counters for all ports */
  2147. REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
  2148. /* Wait for the flush to complete. */
  2149. mutex_lock(&ps->smi_mutex);
  2150. ret = _mv88e6xxx_stats_wait(ds);
  2151. if (ret < 0)
  2152. goto unlock;
  2153. /* Clear all ATU entries */
  2154. ret = _mv88e6xxx_atu_flush(ds, 0, true);
  2155. if (ret < 0)
  2156. goto unlock;
  2157. /* Clear all the VTU and STU entries */
  2158. ret = _mv88e6xxx_vtu_stu_flush(ds);
  2159. unlock:
  2160. mutex_unlock(&ps->smi_mutex);
  2161. return ret;
  2162. }
  2163. int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
  2164. {
  2165. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2166. u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
  2167. struct gpio_desc *gpiod = ds->pd->reset;
  2168. unsigned long timeout;
  2169. int ret;
  2170. int i;
  2171. /* Set all ports to the disabled state. */
  2172. for (i = 0; i < ps->num_ports; i++) {
  2173. ret = REG_READ(REG_PORT(i), PORT_CONTROL);
  2174. REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
  2175. }
  2176. /* Wait for transmit queues to drain. */
  2177. usleep_range(2000, 4000);
  2178. /* If there is a gpio connected to the reset pin, toggle it */
  2179. if (gpiod) {
  2180. gpiod_set_value_cansleep(gpiod, 1);
  2181. usleep_range(10000, 20000);
  2182. gpiod_set_value_cansleep(gpiod, 0);
  2183. usleep_range(10000, 20000);
  2184. }
  2185. /* Reset the switch. Keep the PPU active if requested. The PPU
  2186. * needs to be active to support indirect phy register access
  2187. * through global registers 0x18 and 0x19.
  2188. */
  2189. if (ppu_active)
  2190. REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
  2191. else
  2192. REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
  2193. /* Wait up to one second for reset to complete. */
  2194. timeout = jiffies + 1 * HZ;
  2195. while (time_before(jiffies, timeout)) {
  2196. ret = REG_READ(REG_GLOBAL, 0x00);
  2197. if ((ret & is_reset) == is_reset)
  2198. break;
  2199. usleep_range(1000, 2000);
  2200. }
  2201. if (time_after(jiffies, timeout))
  2202. return -ETIMEDOUT;
  2203. return 0;
  2204. }
  2205. int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
  2206. {
  2207. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2208. int ret;
  2209. mutex_lock(&ps->smi_mutex);
  2210. ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
  2211. if (ret < 0)
  2212. goto error;
  2213. ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
  2214. error:
  2215. _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
  2216. mutex_unlock(&ps->smi_mutex);
  2217. return ret;
  2218. }
  2219. int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
  2220. int reg, int val)
  2221. {
  2222. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2223. int ret;
  2224. mutex_lock(&ps->smi_mutex);
  2225. ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
  2226. if (ret < 0)
  2227. goto error;
  2228. ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
  2229. error:
  2230. _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
  2231. mutex_unlock(&ps->smi_mutex);
  2232. return ret;
  2233. }
  2234. static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
  2235. {
  2236. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2237. if (port >= 0 && port < ps->num_ports)
  2238. return port;
  2239. return -EINVAL;
  2240. }
  2241. int
  2242. mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
  2243. {
  2244. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2245. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2246. int ret;
  2247. if (addr < 0)
  2248. return addr;
  2249. mutex_lock(&ps->smi_mutex);
  2250. ret = _mv88e6xxx_phy_read(ds, addr, regnum);
  2251. mutex_unlock(&ps->smi_mutex);
  2252. return ret;
  2253. }
  2254. int
  2255. mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
  2256. {
  2257. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2258. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2259. int ret;
  2260. if (addr < 0)
  2261. return addr;
  2262. mutex_lock(&ps->smi_mutex);
  2263. ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
  2264. mutex_unlock(&ps->smi_mutex);
  2265. return ret;
  2266. }
  2267. int
  2268. mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
  2269. {
  2270. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2271. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2272. int ret;
  2273. if (addr < 0)
  2274. return addr;
  2275. mutex_lock(&ps->smi_mutex);
  2276. ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
  2277. mutex_unlock(&ps->smi_mutex);
  2278. return ret;
  2279. }
  2280. int
  2281. mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
  2282. u16 val)
  2283. {
  2284. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2285. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2286. int ret;
  2287. if (addr < 0)
  2288. return addr;
  2289. mutex_lock(&ps->smi_mutex);
  2290. ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
  2291. mutex_unlock(&ps->smi_mutex);
  2292. return ret;
  2293. }
  2294. #ifdef CONFIG_NET_DSA_HWMON
  2295. static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
  2296. {
  2297. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2298. int ret;
  2299. int val;
  2300. *temp = 0;
  2301. mutex_lock(&ps->smi_mutex);
  2302. ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
  2303. if (ret < 0)
  2304. goto error;
  2305. /* Enable temperature sensor */
  2306. ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
  2307. if (ret < 0)
  2308. goto error;
  2309. ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
  2310. if (ret < 0)
  2311. goto error;
  2312. /* Wait for temperature to stabilize */
  2313. usleep_range(10000, 12000);
  2314. val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
  2315. if (val < 0) {
  2316. ret = val;
  2317. goto error;
  2318. }
  2319. /* Disable temperature sensor */
  2320. ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
  2321. if (ret < 0)
  2322. goto error;
  2323. *temp = ((val & 0x1f) - 5) * 5;
  2324. error:
  2325. _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
  2326. mutex_unlock(&ps->smi_mutex);
  2327. return ret;
  2328. }
  2329. static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
  2330. {
  2331. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2332. int ret;
  2333. *temp = 0;
  2334. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
  2335. if (ret < 0)
  2336. return ret;
  2337. *temp = (ret & 0xff) - 25;
  2338. return 0;
  2339. }
  2340. int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
  2341. {
  2342. if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
  2343. return mv88e63xx_get_temp(ds, temp);
  2344. return mv88e61xx_get_temp(ds, temp);
  2345. }
  2346. int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
  2347. {
  2348. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2349. int ret;
  2350. if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
  2351. return -EOPNOTSUPP;
  2352. *temp = 0;
  2353. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
  2354. if (ret < 0)
  2355. return ret;
  2356. *temp = (((ret >> 8) & 0x1f) * 5) - 25;
  2357. return 0;
  2358. }
  2359. int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
  2360. {
  2361. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2362. int ret;
  2363. if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
  2364. return -EOPNOTSUPP;
  2365. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
  2366. if (ret < 0)
  2367. return ret;
  2368. temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
  2369. return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
  2370. (ret & 0xe0ff) | (temp << 8));
  2371. }
  2372. int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
  2373. {
  2374. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2375. int ret;
  2376. if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
  2377. return -EOPNOTSUPP;
  2378. *alarm = false;
  2379. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
  2380. if (ret < 0)
  2381. return ret;
  2382. *alarm = !!(ret & 0x40);
  2383. return 0;
  2384. }
  2385. #endif /* CONFIG_NET_DSA_HWMON */
  2386. char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr,
  2387. const struct mv88e6xxx_switch_id *table,
  2388. unsigned int num)
  2389. {
  2390. struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
  2391. int i, ret;
  2392. if (!bus)
  2393. return NULL;
  2394. ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
  2395. if (ret < 0)
  2396. return NULL;
  2397. /* Look up the exact switch ID */
  2398. for (i = 0; i < num; ++i)
  2399. if (table[i].id == ret)
  2400. return table[i].name;
  2401. /* Look up only the product number */
  2402. for (i = 0; i < num; ++i) {
  2403. if (table[i].id == (ret & PORT_SWITCH_ID_PROD_NUM_MASK)) {
  2404. dev_warn(host_dev, "unknown revision %d, using base switch 0x%x\n",
  2405. ret & PORT_SWITCH_ID_REV_MASK,
  2406. ret & PORT_SWITCH_ID_PROD_NUM_MASK);
  2407. return table[i].name;
  2408. }
  2409. }
  2410. return NULL;
  2411. }
  2412. static int __init mv88e6xxx_init(void)
  2413. {
  2414. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
  2415. register_switch_driver(&mv88e6131_switch_driver);
  2416. #endif
  2417. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
  2418. register_switch_driver(&mv88e6123_switch_driver);
  2419. #endif
  2420. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
  2421. register_switch_driver(&mv88e6352_switch_driver);
  2422. #endif
  2423. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
  2424. register_switch_driver(&mv88e6171_switch_driver);
  2425. #endif
  2426. return 0;
  2427. }
  2428. module_init(mv88e6xxx_init);
  2429. static void __exit mv88e6xxx_cleanup(void)
  2430. {
  2431. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
  2432. unregister_switch_driver(&mv88e6171_switch_driver);
  2433. #endif
  2434. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
  2435. unregister_switch_driver(&mv88e6352_switch_driver);
  2436. #endif
  2437. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
  2438. unregister_switch_driver(&mv88e6123_switch_driver);
  2439. #endif
  2440. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
  2441. unregister_switch_driver(&mv88e6131_switch_driver);
  2442. #endif
  2443. }
  2444. module_exit(mv88e6xxx_cleanup);
  2445. MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
  2446. MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
  2447. MODULE_LICENSE("GPL");