mv88e6xxx.c 73 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073
  1. /*
  2. * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
  3. * Copyright (c) 2008 Marvell Semiconductor
  4. *
  5. * Copyright (c) 2015 CMC Electronics, Inc.
  6. * Added support for VLAN Table Unit operations
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/delay.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/ethtool.h>
  16. #include <linux/if_bridge.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/list.h>
  19. #include <linux/module.h>
  20. #include <linux/netdevice.h>
  21. #include <linux/gpio/consumer.h>
  22. #include <linux/phy.h>
  23. #include <net/dsa.h>
  24. #include <net/switchdev.h>
  25. #include "mv88e6xxx.h"
  26. static void assert_smi_lock(struct dsa_switch *ds)
  27. {
  28. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  29. if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
  30. dev_err(ds->master_dev, "SMI lock not held!\n");
  31. dump_stack();
  32. }
  33. }
  34. /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
  35. * use all 32 SMI bus addresses on its SMI bus, and all switch registers
  36. * will be directly accessible on some {device address,register address}
  37. * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
  38. * will only respond to SMI transactions to that specific address, and
  39. * an indirect addressing mechanism needs to be used to access its
  40. * registers.
  41. */
  42. static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
  43. {
  44. int ret;
  45. int i;
  46. for (i = 0; i < 16; i++) {
  47. ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
  48. if (ret < 0)
  49. return ret;
  50. if ((ret & SMI_CMD_BUSY) == 0)
  51. return 0;
  52. }
  53. return -ETIMEDOUT;
  54. }
  55. static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
  56. int reg)
  57. {
  58. int ret;
  59. if (sw_addr == 0)
  60. return mdiobus_read_nested(bus, addr, reg);
  61. /* Wait for the bus to become free. */
  62. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  63. if (ret < 0)
  64. return ret;
  65. /* Transmit the read command. */
  66. ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
  67. SMI_CMD_OP_22_READ | (addr << 5) | reg);
  68. if (ret < 0)
  69. return ret;
  70. /* Wait for the read command to complete. */
  71. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  72. if (ret < 0)
  73. return ret;
  74. /* Read the data. */
  75. ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
  76. if (ret < 0)
  77. return ret;
  78. return ret & 0xffff;
  79. }
  80. static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
  81. {
  82. struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
  83. int ret;
  84. assert_smi_lock(ds);
  85. if (bus == NULL)
  86. return -EINVAL;
  87. ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
  88. if (ret < 0)
  89. return ret;
  90. dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
  91. addr, reg, ret);
  92. return ret;
  93. }
  94. int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
  95. {
  96. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  97. int ret;
  98. mutex_lock(&ps->smi_mutex);
  99. ret = _mv88e6xxx_reg_read(ds, addr, reg);
  100. mutex_unlock(&ps->smi_mutex);
  101. return ret;
  102. }
  103. static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
  104. int reg, u16 val)
  105. {
  106. int ret;
  107. if (sw_addr == 0)
  108. return mdiobus_write_nested(bus, addr, reg, val);
  109. /* Wait for the bus to become free. */
  110. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  111. if (ret < 0)
  112. return ret;
  113. /* Transmit the data to write. */
  114. ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
  115. if (ret < 0)
  116. return ret;
  117. /* Transmit the write command. */
  118. ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
  119. SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
  120. if (ret < 0)
  121. return ret;
  122. /* Wait for the write command to complete. */
  123. ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
  124. if (ret < 0)
  125. return ret;
  126. return 0;
  127. }
  128. static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
  129. u16 val)
  130. {
  131. struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
  132. assert_smi_lock(ds);
  133. if (bus == NULL)
  134. return -EINVAL;
  135. dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
  136. addr, reg, val);
  137. return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
  138. }
  139. int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
  140. {
  141. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  142. int ret;
  143. mutex_lock(&ps->smi_mutex);
  144. ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
  145. mutex_unlock(&ps->smi_mutex);
  146. return ret;
  147. }
  148. int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
  149. {
  150. REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
  151. REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
  152. REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
  153. return 0;
  154. }
  155. int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
  156. {
  157. int i;
  158. int ret;
  159. for (i = 0; i < 6; i++) {
  160. int j;
  161. /* Write the MAC address byte. */
  162. REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
  163. GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
  164. /* Wait for the write to complete. */
  165. for (j = 0; j < 16; j++) {
  166. ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
  167. if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
  168. break;
  169. }
  170. if (j == 16)
  171. return -ETIMEDOUT;
  172. }
  173. return 0;
  174. }
  175. static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
  176. {
  177. if (addr >= 0)
  178. return _mv88e6xxx_reg_read(ds, addr, regnum);
  179. return 0xffff;
  180. }
  181. static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
  182. u16 val)
  183. {
  184. if (addr >= 0)
  185. return _mv88e6xxx_reg_write(ds, addr, regnum, val);
  186. return 0;
  187. }
  188. #ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
  189. static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
  190. {
  191. int ret;
  192. unsigned long timeout;
  193. ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
  194. REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
  195. ret & ~GLOBAL_CONTROL_PPU_ENABLE);
  196. timeout = jiffies + 1 * HZ;
  197. while (time_before(jiffies, timeout)) {
  198. ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
  199. usleep_range(1000, 2000);
  200. if ((ret & GLOBAL_STATUS_PPU_MASK) !=
  201. GLOBAL_STATUS_PPU_POLLING)
  202. return 0;
  203. }
  204. return -ETIMEDOUT;
  205. }
  206. static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
  207. {
  208. int ret;
  209. unsigned long timeout;
  210. ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
  211. REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
  212. timeout = jiffies + 1 * HZ;
  213. while (time_before(jiffies, timeout)) {
  214. ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
  215. usleep_range(1000, 2000);
  216. if ((ret & GLOBAL_STATUS_PPU_MASK) ==
  217. GLOBAL_STATUS_PPU_POLLING)
  218. return 0;
  219. }
  220. return -ETIMEDOUT;
  221. }
  222. static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
  223. {
  224. struct mv88e6xxx_priv_state *ps;
  225. ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
  226. if (mutex_trylock(&ps->ppu_mutex)) {
  227. struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
  228. if (mv88e6xxx_ppu_enable(ds) == 0)
  229. ps->ppu_disabled = 0;
  230. mutex_unlock(&ps->ppu_mutex);
  231. }
  232. }
  233. static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
  234. {
  235. struct mv88e6xxx_priv_state *ps = (void *)_ps;
  236. schedule_work(&ps->ppu_work);
  237. }
  238. static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
  239. {
  240. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  241. int ret;
  242. mutex_lock(&ps->ppu_mutex);
  243. /* If the PHY polling unit is enabled, disable it so that
  244. * we can access the PHY registers. If it was already
  245. * disabled, cancel the timer that is going to re-enable
  246. * it.
  247. */
  248. if (!ps->ppu_disabled) {
  249. ret = mv88e6xxx_ppu_disable(ds);
  250. if (ret < 0) {
  251. mutex_unlock(&ps->ppu_mutex);
  252. return ret;
  253. }
  254. ps->ppu_disabled = 1;
  255. } else {
  256. del_timer(&ps->ppu_timer);
  257. ret = 0;
  258. }
  259. return ret;
  260. }
  261. static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
  262. {
  263. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  264. /* Schedule a timer to re-enable the PHY polling unit. */
  265. mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
  266. mutex_unlock(&ps->ppu_mutex);
  267. }
  268. void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
  269. {
  270. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  271. mutex_init(&ps->ppu_mutex);
  272. INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
  273. init_timer(&ps->ppu_timer);
  274. ps->ppu_timer.data = (unsigned long)ps;
  275. ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
  276. }
  277. int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
  278. {
  279. int ret;
  280. ret = mv88e6xxx_ppu_access_get(ds);
  281. if (ret >= 0) {
  282. ret = mv88e6xxx_reg_read(ds, addr, regnum);
  283. mv88e6xxx_ppu_access_put(ds);
  284. }
  285. return ret;
  286. }
  287. int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
  288. int regnum, u16 val)
  289. {
  290. int ret;
  291. ret = mv88e6xxx_ppu_access_get(ds);
  292. if (ret >= 0) {
  293. ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
  294. mv88e6xxx_ppu_access_put(ds);
  295. }
  296. return ret;
  297. }
  298. #endif
  299. static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
  300. {
  301. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  302. switch (ps->id) {
  303. case PORT_SWITCH_ID_6031:
  304. case PORT_SWITCH_ID_6061:
  305. case PORT_SWITCH_ID_6035:
  306. case PORT_SWITCH_ID_6065:
  307. return true;
  308. }
  309. return false;
  310. }
  311. static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
  312. {
  313. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  314. switch (ps->id) {
  315. case PORT_SWITCH_ID_6092:
  316. case PORT_SWITCH_ID_6095:
  317. return true;
  318. }
  319. return false;
  320. }
  321. static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
  322. {
  323. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  324. switch (ps->id) {
  325. case PORT_SWITCH_ID_6046:
  326. case PORT_SWITCH_ID_6085:
  327. case PORT_SWITCH_ID_6096:
  328. case PORT_SWITCH_ID_6097:
  329. return true;
  330. }
  331. return false;
  332. }
  333. static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
  334. {
  335. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  336. switch (ps->id) {
  337. case PORT_SWITCH_ID_6123:
  338. case PORT_SWITCH_ID_6161:
  339. case PORT_SWITCH_ID_6165:
  340. return true;
  341. }
  342. return false;
  343. }
  344. static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
  345. {
  346. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  347. switch (ps->id) {
  348. case PORT_SWITCH_ID_6121:
  349. case PORT_SWITCH_ID_6122:
  350. case PORT_SWITCH_ID_6152:
  351. case PORT_SWITCH_ID_6155:
  352. case PORT_SWITCH_ID_6182:
  353. case PORT_SWITCH_ID_6185:
  354. case PORT_SWITCH_ID_6108:
  355. case PORT_SWITCH_ID_6131:
  356. return true;
  357. }
  358. return false;
  359. }
  360. static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
  361. {
  362. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  363. switch (ps->id) {
  364. case PORT_SWITCH_ID_6320:
  365. case PORT_SWITCH_ID_6321:
  366. return true;
  367. }
  368. return false;
  369. }
  370. static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
  371. {
  372. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  373. switch (ps->id) {
  374. case PORT_SWITCH_ID_6171:
  375. case PORT_SWITCH_ID_6175:
  376. case PORT_SWITCH_ID_6350:
  377. case PORT_SWITCH_ID_6351:
  378. return true;
  379. }
  380. return false;
  381. }
  382. static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
  383. {
  384. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  385. switch (ps->id) {
  386. case PORT_SWITCH_ID_6172:
  387. case PORT_SWITCH_ID_6176:
  388. case PORT_SWITCH_ID_6240:
  389. case PORT_SWITCH_ID_6352:
  390. return true;
  391. }
  392. return false;
  393. }
  394. /* We expect the switch to perform auto negotiation if there is a real
  395. * phy. However, in the case of a fixed link phy, we force the port
  396. * settings from the fixed link settings.
  397. */
  398. void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
  399. struct phy_device *phydev)
  400. {
  401. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  402. u32 reg;
  403. int ret;
  404. if (!phy_is_pseudo_fixed_link(phydev))
  405. return;
  406. mutex_lock(&ps->smi_mutex);
  407. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
  408. if (ret < 0)
  409. goto out;
  410. reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
  411. PORT_PCS_CTRL_FORCE_LINK |
  412. PORT_PCS_CTRL_DUPLEX_FULL |
  413. PORT_PCS_CTRL_FORCE_DUPLEX |
  414. PORT_PCS_CTRL_UNFORCED);
  415. reg |= PORT_PCS_CTRL_FORCE_LINK;
  416. if (phydev->link)
  417. reg |= PORT_PCS_CTRL_LINK_UP;
  418. if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
  419. goto out;
  420. switch (phydev->speed) {
  421. case SPEED_1000:
  422. reg |= PORT_PCS_CTRL_1000;
  423. break;
  424. case SPEED_100:
  425. reg |= PORT_PCS_CTRL_100;
  426. break;
  427. case SPEED_10:
  428. reg |= PORT_PCS_CTRL_10;
  429. break;
  430. default:
  431. pr_info("Unknown speed");
  432. goto out;
  433. }
  434. reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
  435. if (phydev->duplex == DUPLEX_FULL)
  436. reg |= PORT_PCS_CTRL_DUPLEX_FULL;
  437. if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
  438. (port >= ps->num_ports - 2)) {
  439. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
  440. reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
  441. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
  442. reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
  443. if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
  444. reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
  445. PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
  446. }
  447. _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
  448. out:
  449. mutex_unlock(&ps->smi_mutex);
  450. }
  451. static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
  452. {
  453. int ret;
  454. int i;
  455. for (i = 0; i < 10; i++) {
  456. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
  457. if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
  458. return 0;
  459. }
  460. return -ETIMEDOUT;
  461. }
  462. static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
  463. {
  464. int ret;
  465. if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
  466. port = (port + 1) << 5;
  467. /* Snapshot the hardware statistics counters for this port. */
  468. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
  469. GLOBAL_STATS_OP_CAPTURE_PORT |
  470. GLOBAL_STATS_OP_HIST_RX_TX | port);
  471. if (ret < 0)
  472. return ret;
  473. /* Wait for the snapshotting to complete. */
  474. ret = _mv88e6xxx_stats_wait(ds);
  475. if (ret < 0)
  476. return ret;
  477. return 0;
  478. }
  479. static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
  480. {
  481. u32 _val;
  482. int ret;
  483. *val = 0;
  484. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
  485. GLOBAL_STATS_OP_READ_CAPTURED |
  486. GLOBAL_STATS_OP_HIST_RX_TX | stat);
  487. if (ret < 0)
  488. return;
  489. ret = _mv88e6xxx_stats_wait(ds);
  490. if (ret < 0)
  491. return;
  492. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
  493. if (ret < 0)
  494. return;
  495. _val = ret << 16;
  496. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
  497. if (ret < 0)
  498. return;
  499. *val = _val | ret;
  500. }
  501. static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
  502. { "in_good_octets", 8, 0x00, BANK0, },
  503. { "in_bad_octets", 4, 0x02, BANK0, },
  504. { "in_unicast", 4, 0x04, BANK0, },
  505. { "in_broadcasts", 4, 0x06, BANK0, },
  506. { "in_multicasts", 4, 0x07, BANK0, },
  507. { "in_pause", 4, 0x16, BANK0, },
  508. { "in_undersize", 4, 0x18, BANK0, },
  509. { "in_fragments", 4, 0x19, BANK0, },
  510. { "in_oversize", 4, 0x1a, BANK0, },
  511. { "in_jabber", 4, 0x1b, BANK0, },
  512. { "in_rx_error", 4, 0x1c, BANK0, },
  513. { "in_fcs_error", 4, 0x1d, BANK0, },
  514. { "out_octets", 8, 0x0e, BANK0, },
  515. { "out_unicast", 4, 0x10, BANK0, },
  516. { "out_broadcasts", 4, 0x13, BANK0, },
  517. { "out_multicasts", 4, 0x12, BANK0, },
  518. { "out_pause", 4, 0x15, BANK0, },
  519. { "excessive", 4, 0x11, BANK0, },
  520. { "collisions", 4, 0x1e, BANK0, },
  521. { "deferred", 4, 0x05, BANK0, },
  522. { "single", 4, 0x14, BANK0, },
  523. { "multiple", 4, 0x17, BANK0, },
  524. { "out_fcs_error", 4, 0x03, BANK0, },
  525. { "late", 4, 0x1f, BANK0, },
  526. { "hist_64bytes", 4, 0x08, BANK0, },
  527. { "hist_65_127bytes", 4, 0x09, BANK0, },
  528. { "hist_128_255bytes", 4, 0x0a, BANK0, },
  529. { "hist_256_511bytes", 4, 0x0b, BANK0, },
  530. { "hist_512_1023bytes", 4, 0x0c, BANK0, },
  531. { "hist_1024_max_bytes", 4, 0x0d, BANK0, },
  532. { "sw_in_discards", 4, 0x10, PORT, },
  533. { "sw_in_filtered", 2, 0x12, PORT, },
  534. { "sw_out_filtered", 2, 0x13, PORT, },
  535. { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  536. { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  537. { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  538. { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  539. { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  540. { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  541. { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  542. { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  543. { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  544. { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  545. { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, },
  546. { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, },
  547. { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, },
  548. { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, },
  549. { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  550. { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  551. { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  552. { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  553. { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  554. { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  555. { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  556. { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  557. { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, },
  558. { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, },
  559. { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, },
  560. { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
  561. };
  562. static bool mv88e6xxx_has_stat(struct dsa_switch *ds,
  563. struct mv88e6xxx_hw_stat *stat)
  564. {
  565. switch (stat->type) {
  566. case BANK0:
  567. return true;
  568. case BANK1:
  569. return mv88e6xxx_6320_family(ds);
  570. case PORT:
  571. return mv88e6xxx_6095_family(ds) ||
  572. mv88e6xxx_6185_family(ds) ||
  573. mv88e6xxx_6097_family(ds) ||
  574. mv88e6xxx_6165_family(ds) ||
  575. mv88e6xxx_6351_family(ds) ||
  576. mv88e6xxx_6352_family(ds);
  577. }
  578. return false;
  579. }
  580. static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
  581. struct mv88e6xxx_hw_stat *s,
  582. int port)
  583. {
  584. u32 low;
  585. u32 high = 0;
  586. int ret;
  587. u64 value;
  588. switch (s->type) {
  589. case PORT:
  590. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), s->reg);
  591. if (ret < 0)
  592. return UINT64_MAX;
  593. low = ret;
  594. if (s->sizeof_stat == 4) {
  595. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
  596. s->reg + 1);
  597. if (ret < 0)
  598. return UINT64_MAX;
  599. high = ret;
  600. }
  601. break;
  602. case BANK0:
  603. case BANK1:
  604. _mv88e6xxx_stats_read(ds, s->reg, &low);
  605. if (s->sizeof_stat == 8)
  606. _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
  607. }
  608. value = (((u64)high) << 16) | low;
  609. return value;
  610. }
  611. void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
  612. {
  613. struct mv88e6xxx_hw_stat *stat;
  614. int i, j;
  615. for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
  616. stat = &mv88e6xxx_hw_stats[i];
  617. if (mv88e6xxx_has_stat(ds, stat)) {
  618. memcpy(data + j * ETH_GSTRING_LEN, stat->string,
  619. ETH_GSTRING_LEN);
  620. j++;
  621. }
  622. }
  623. }
  624. int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
  625. {
  626. struct mv88e6xxx_hw_stat *stat;
  627. int i, j;
  628. for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
  629. stat = &mv88e6xxx_hw_stats[i];
  630. if (mv88e6xxx_has_stat(ds, stat))
  631. j++;
  632. }
  633. return j;
  634. }
  635. void
  636. mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
  637. int port, uint64_t *data)
  638. {
  639. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  640. struct mv88e6xxx_hw_stat *stat;
  641. int ret;
  642. int i, j;
  643. mutex_lock(&ps->smi_mutex);
  644. ret = _mv88e6xxx_stats_snapshot(ds, port);
  645. if (ret < 0) {
  646. mutex_unlock(&ps->smi_mutex);
  647. return;
  648. }
  649. for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
  650. stat = &mv88e6xxx_hw_stats[i];
  651. if (mv88e6xxx_has_stat(ds, stat)) {
  652. data[j] = _mv88e6xxx_get_ethtool_stat(ds, stat, port);
  653. j++;
  654. }
  655. }
  656. mutex_unlock(&ps->smi_mutex);
  657. }
  658. int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
  659. {
  660. return 32 * sizeof(u16);
  661. }
  662. void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
  663. struct ethtool_regs *regs, void *_p)
  664. {
  665. u16 *p = _p;
  666. int i;
  667. regs->version = 0;
  668. memset(p, 0xff, 32 * sizeof(u16));
  669. for (i = 0; i < 32; i++) {
  670. int ret;
  671. ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
  672. if (ret >= 0)
  673. p[i] = ret;
  674. }
  675. }
  676. static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
  677. u16 mask)
  678. {
  679. unsigned long timeout = jiffies + HZ / 10;
  680. while (time_before(jiffies, timeout)) {
  681. int ret;
  682. ret = _mv88e6xxx_reg_read(ds, reg, offset);
  683. if (ret < 0)
  684. return ret;
  685. if (!(ret & mask))
  686. return 0;
  687. usleep_range(1000, 2000);
  688. }
  689. return -ETIMEDOUT;
  690. }
  691. static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
  692. {
  693. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  694. int ret;
  695. mutex_lock(&ps->smi_mutex);
  696. ret = _mv88e6xxx_wait(ds, reg, offset, mask);
  697. mutex_unlock(&ps->smi_mutex);
  698. return ret;
  699. }
  700. static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
  701. {
  702. return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
  703. GLOBAL2_SMI_OP_BUSY);
  704. }
  705. int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
  706. {
  707. return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
  708. GLOBAL2_EEPROM_OP_LOAD);
  709. }
  710. int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
  711. {
  712. return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
  713. GLOBAL2_EEPROM_OP_BUSY);
  714. }
  715. static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
  716. {
  717. return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
  718. GLOBAL_ATU_OP_BUSY);
  719. }
  720. static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
  721. int regnum)
  722. {
  723. int ret;
  724. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
  725. GLOBAL2_SMI_OP_22_READ | (addr << 5) |
  726. regnum);
  727. if (ret < 0)
  728. return ret;
  729. ret = _mv88e6xxx_phy_wait(ds);
  730. if (ret < 0)
  731. return ret;
  732. return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
  733. }
  734. static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
  735. int regnum, u16 val)
  736. {
  737. int ret;
  738. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
  739. if (ret < 0)
  740. return ret;
  741. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
  742. GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
  743. regnum);
  744. return _mv88e6xxx_phy_wait(ds);
  745. }
  746. int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
  747. {
  748. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  749. int reg;
  750. mutex_lock(&ps->smi_mutex);
  751. reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
  752. if (reg < 0)
  753. goto out;
  754. e->eee_enabled = !!(reg & 0x0200);
  755. e->tx_lpi_enabled = !!(reg & 0x0100);
  756. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
  757. if (reg < 0)
  758. goto out;
  759. e->eee_active = !!(reg & PORT_STATUS_EEE);
  760. reg = 0;
  761. out:
  762. mutex_unlock(&ps->smi_mutex);
  763. return reg;
  764. }
  765. int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
  766. struct phy_device *phydev, struct ethtool_eee *e)
  767. {
  768. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  769. int reg;
  770. int ret;
  771. mutex_lock(&ps->smi_mutex);
  772. ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
  773. if (ret < 0)
  774. goto out;
  775. reg = ret & ~0x0300;
  776. if (e->eee_enabled)
  777. reg |= 0x0200;
  778. if (e->tx_lpi_enabled)
  779. reg |= 0x0100;
  780. ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
  781. out:
  782. mutex_unlock(&ps->smi_mutex);
  783. return ret;
  784. }
  785. static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd)
  786. {
  787. int ret;
  788. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
  789. if (ret < 0)
  790. return ret;
  791. return _mv88e6xxx_atu_wait(ds);
  792. }
  793. static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
  794. struct mv88e6xxx_atu_entry *entry)
  795. {
  796. u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
  797. if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
  798. unsigned int mask, shift;
  799. if (entry->trunk) {
  800. data |= GLOBAL_ATU_DATA_TRUNK;
  801. mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
  802. shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
  803. } else {
  804. mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
  805. shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
  806. }
  807. data |= (entry->portv_trunkid << shift) & mask;
  808. }
  809. return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
  810. }
  811. static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
  812. struct mv88e6xxx_atu_entry *entry,
  813. bool static_too)
  814. {
  815. int op;
  816. int err;
  817. err = _mv88e6xxx_atu_wait(ds);
  818. if (err)
  819. return err;
  820. err = _mv88e6xxx_atu_data_write(ds, entry);
  821. if (err)
  822. return err;
  823. if (entry->fid) {
  824. err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
  825. entry->fid);
  826. if (err)
  827. return err;
  828. op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
  829. GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
  830. } else {
  831. op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
  832. GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
  833. }
  834. return _mv88e6xxx_atu_cmd(ds, op);
  835. }
  836. static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
  837. {
  838. struct mv88e6xxx_atu_entry entry = {
  839. .fid = fid,
  840. .state = 0, /* EntryState bits must be 0 */
  841. };
  842. return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
  843. }
  844. static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
  845. int to_port, bool static_too)
  846. {
  847. struct mv88e6xxx_atu_entry entry = {
  848. .trunk = false,
  849. .fid = fid,
  850. };
  851. /* EntryState bits must be 0xF */
  852. entry.state = GLOBAL_ATU_DATA_STATE_MASK;
  853. /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
  854. entry.portv_trunkid = (to_port & 0x0f) << 4;
  855. entry.portv_trunkid |= from_port & 0x0f;
  856. return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
  857. }
  858. static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
  859. bool static_too)
  860. {
  861. /* Destination port 0xF means remove the entries */
  862. return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
  863. }
  864. static const char * const mv88e6xxx_port_state_names[] = {
  865. [PORT_CONTROL_STATE_DISABLED] = "Disabled",
  866. [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
  867. [PORT_CONTROL_STATE_LEARNING] = "Learning",
  868. [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
  869. };
  870. static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
  871. {
  872. int reg, ret = 0;
  873. u8 oldstate;
  874. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
  875. if (reg < 0)
  876. return reg;
  877. oldstate = reg & PORT_CONTROL_STATE_MASK;
  878. if (oldstate != state) {
  879. /* Flush forwarding database if we're moving a port
  880. * from Learning or Forwarding state to Disabled or
  881. * Blocking or Listening state.
  882. */
  883. if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
  884. oldstate == PORT_CONTROL_STATE_FORWARDING)
  885. && (state == PORT_CONTROL_STATE_DISABLED ||
  886. state == PORT_CONTROL_STATE_BLOCKING)) {
  887. ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
  888. if (ret)
  889. return ret;
  890. }
  891. reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
  892. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
  893. reg);
  894. if (ret)
  895. return ret;
  896. netdev_dbg(ds->ports[port], "PortState %s (was %s)\n",
  897. mv88e6xxx_port_state_names[state],
  898. mv88e6xxx_port_state_names[oldstate]);
  899. }
  900. return ret;
  901. }
  902. static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
  903. {
  904. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  905. struct net_device *bridge = ps->ports[port].bridge_dev;
  906. const u16 mask = (1 << ps->num_ports) - 1;
  907. u16 output_ports = 0;
  908. int reg;
  909. int i;
  910. /* allow CPU port or DSA link(s) to send frames to every port */
  911. if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
  912. output_ports = mask;
  913. } else {
  914. for (i = 0; i < ps->num_ports; ++i) {
  915. /* allow sending frames to every group member */
  916. if (bridge && ps->ports[i].bridge_dev == bridge)
  917. output_ports |= BIT(i);
  918. /* allow sending frames to CPU port and DSA link(s) */
  919. if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
  920. output_ports |= BIT(i);
  921. }
  922. }
  923. /* prevent frames from going back out of the port they came in on */
  924. output_ports &= ~BIT(port);
  925. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
  926. if (reg < 0)
  927. return reg;
  928. reg &= ~mask;
  929. reg |= output_ports & mask;
  930. return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
  931. }
  932. int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
  933. {
  934. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  935. int stp_state;
  936. switch (state) {
  937. case BR_STATE_DISABLED:
  938. stp_state = PORT_CONTROL_STATE_DISABLED;
  939. break;
  940. case BR_STATE_BLOCKING:
  941. case BR_STATE_LISTENING:
  942. stp_state = PORT_CONTROL_STATE_BLOCKING;
  943. break;
  944. case BR_STATE_LEARNING:
  945. stp_state = PORT_CONTROL_STATE_LEARNING;
  946. break;
  947. case BR_STATE_FORWARDING:
  948. default:
  949. stp_state = PORT_CONTROL_STATE_FORWARDING;
  950. break;
  951. }
  952. /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
  953. * so we can not update the port state directly but need to schedule it.
  954. */
  955. ps->ports[port].state = stp_state;
  956. set_bit(port, ps->port_state_update_mask);
  957. schedule_work(&ps->bridge_work);
  958. return 0;
  959. }
  960. static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
  961. u16 *old)
  962. {
  963. u16 pvid;
  964. int ret;
  965. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
  966. if (ret < 0)
  967. return ret;
  968. pvid = ret & PORT_DEFAULT_VLAN_MASK;
  969. if (new) {
  970. ret &= ~PORT_DEFAULT_VLAN_MASK;
  971. ret |= *new & PORT_DEFAULT_VLAN_MASK;
  972. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  973. PORT_DEFAULT_VLAN, ret);
  974. if (ret < 0)
  975. return ret;
  976. netdev_dbg(ds->ports[port], "DefaultVID %d (was %d)\n", *new,
  977. pvid);
  978. }
  979. if (old)
  980. *old = pvid;
  981. return 0;
  982. }
  983. static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
  984. {
  985. return _mv88e6xxx_port_pvid(ds, port, NULL, pvid);
  986. }
  987. static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
  988. {
  989. return _mv88e6xxx_port_pvid(ds, port, &pvid, NULL);
  990. }
  991. static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
  992. {
  993. return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
  994. GLOBAL_VTU_OP_BUSY);
  995. }
  996. static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
  997. {
  998. int ret;
  999. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
  1000. if (ret < 0)
  1001. return ret;
  1002. return _mv88e6xxx_vtu_wait(ds);
  1003. }
  1004. static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
  1005. {
  1006. int ret;
  1007. ret = _mv88e6xxx_vtu_wait(ds);
  1008. if (ret < 0)
  1009. return ret;
  1010. return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
  1011. }
  1012. static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
  1013. struct mv88e6xxx_vtu_stu_entry *entry,
  1014. unsigned int nibble_offset)
  1015. {
  1016. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1017. u16 regs[3];
  1018. int i;
  1019. int ret;
  1020. for (i = 0; i < 3; ++i) {
  1021. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1022. GLOBAL_VTU_DATA_0_3 + i);
  1023. if (ret < 0)
  1024. return ret;
  1025. regs[i] = ret;
  1026. }
  1027. for (i = 0; i < ps->num_ports; ++i) {
  1028. unsigned int shift = (i % 4) * 4 + nibble_offset;
  1029. u16 reg = regs[i / 4];
  1030. entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
  1031. }
  1032. return 0;
  1033. }
  1034. static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
  1035. struct mv88e6xxx_vtu_stu_entry *entry,
  1036. unsigned int nibble_offset)
  1037. {
  1038. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1039. u16 regs[3] = { 0 };
  1040. int i;
  1041. int ret;
  1042. for (i = 0; i < ps->num_ports; ++i) {
  1043. unsigned int shift = (i % 4) * 4 + nibble_offset;
  1044. u8 data = entry->data[i];
  1045. regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
  1046. }
  1047. for (i = 0; i < 3; ++i) {
  1048. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
  1049. GLOBAL_VTU_DATA_0_3 + i, regs[i]);
  1050. if (ret < 0)
  1051. return ret;
  1052. }
  1053. return 0;
  1054. }
  1055. static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
  1056. {
  1057. return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
  1058. vid & GLOBAL_VTU_VID_MASK);
  1059. }
  1060. static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
  1061. struct mv88e6xxx_vtu_stu_entry *entry)
  1062. {
  1063. struct mv88e6xxx_vtu_stu_entry next = { 0 };
  1064. int ret;
  1065. ret = _mv88e6xxx_vtu_wait(ds);
  1066. if (ret < 0)
  1067. return ret;
  1068. ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
  1069. if (ret < 0)
  1070. return ret;
  1071. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
  1072. if (ret < 0)
  1073. return ret;
  1074. next.vid = ret & GLOBAL_VTU_VID_MASK;
  1075. next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
  1076. if (next.valid) {
  1077. ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
  1078. if (ret < 0)
  1079. return ret;
  1080. if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
  1081. mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
  1082. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1083. GLOBAL_VTU_FID);
  1084. if (ret < 0)
  1085. return ret;
  1086. next.fid = ret & GLOBAL_VTU_FID_MASK;
  1087. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1088. GLOBAL_VTU_SID);
  1089. if (ret < 0)
  1090. return ret;
  1091. next.sid = ret & GLOBAL_VTU_SID_MASK;
  1092. }
  1093. }
  1094. *entry = next;
  1095. return 0;
  1096. }
  1097. int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
  1098. struct switchdev_obj_port_vlan *vlan,
  1099. int (*cb)(struct switchdev_obj *obj))
  1100. {
  1101. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1102. struct mv88e6xxx_vtu_stu_entry next;
  1103. u16 pvid;
  1104. int err;
  1105. mutex_lock(&ps->smi_mutex);
  1106. err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
  1107. if (err)
  1108. goto unlock;
  1109. err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
  1110. if (err)
  1111. goto unlock;
  1112. do {
  1113. err = _mv88e6xxx_vtu_getnext(ds, &next);
  1114. if (err)
  1115. break;
  1116. if (!next.valid)
  1117. break;
  1118. if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
  1119. continue;
  1120. /* reinit and dump this VLAN obj */
  1121. vlan->vid_begin = vlan->vid_end = next.vid;
  1122. vlan->flags = 0;
  1123. if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
  1124. vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  1125. if (next.vid == pvid)
  1126. vlan->flags |= BRIDGE_VLAN_INFO_PVID;
  1127. err = cb(&vlan->obj);
  1128. if (err)
  1129. break;
  1130. } while (next.vid < GLOBAL_VTU_VID_MASK);
  1131. unlock:
  1132. mutex_unlock(&ps->smi_mutex);
  1133. return err;
  1134. }
  1135. static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
  1136. struct mv88e6xxx_vtu_stu_entry *entry)
  1137. {
  1138. u16 reg = 0;
  1139. int ret;
  1140. ret = _mv88e6xxx_vtu_wait(ds);
  1141. if (ret < 0)
  1142. return ret;
  1143. if (!entry->valid)
  1144. goto loadpurge;
  1145. /* Write port member tags */
  1146. ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
  1147. if (ret < 0)
  1148. return ret;
  1149. if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
  1150. mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
  1151. reg = entry->sid & GLOBAL_VTU_SID_MASK;
  1152. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
  1153. if (ret < 0)
  1154. return ret;
  1155. reg = entry->fid & GLOBAL_VTU_FID_MASK;
  1156. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
  1157. if (ret < 0)
  1158. return ret;
  1159. }
  1160. reg = GLOBAL_VTU_VID_VALID;
  1161. loadpurge:
  1162. reg |= entry->vid & GLOBAL_VTU_VID_MASK;
  1163. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
  1164. if (ret < 0)
  1165. return ret;
  1166. return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
  1167. }
  1168. static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
  1169. struct mv88e6xxx_vtu_stu_entry *entry)
  1170. {
  1171. struct mv88e6xxx_vtu_stu_entry next = { 0 };
  1172. int ret;
  1173. ret = _mv88e6xxx_vtu_wait(ds);
  1174. if (ret < 0)
  1175. return ret;
  1176. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
  1177. sid & GLOBAL_VTU_SID_MASK);
  1178. if (ret < 0)
  1179. return ret;
  1180. ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
  1181. if (ret < 0)
  1182. return ret;
  1183. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
  1184. if (ret < 0)
  1185. return ret;
  1186. next.sid = ret & GLOBAL_VTU_SID_MASK;
  1187. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
  1188. if (ret < 0)
  1189. return ret;
  1190. next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
  1191. if (next.valid) {
  1192. ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
  1193. if (ret < 0)
  1194. return ret;
  1195. }
  1196. *entry = next;
  1197. return 0;
  1198. }
  1199. static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
  1200. struct mv88e6xxx_vtu_stu_entry *entry)
  1201. {
  1202. u16 reg = 0;
  1203. int ret;
  1204. ret = _mv88e6xxx_vtu_wait(ds);
  1205. if (ret < 0)
  1206. return ret;
  1207. if (!entry->valid)
  1208. goto loadpurge;
  1209. /* Write port states */
  1210. ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
  1211. if (ret < 0)
  1212. return ret;
  1213. reg = GLOBAL_VTU_VID_VALID;
  1214. loadpurge:
  1215. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
  1216. if (ret < 0)
  1217. return ret;
  1218. reg = entry->sid & GLOBAL_VTU_SID_MASK;
  1219. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
  1220. if (ret < 0)
  1221. return ret;
  1222. return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
  1223. }
  1224. static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
  1225. u16 *old)
  1226. {
  1227. u16 fid;
  1228. int ret;
  1229. /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
  1230. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
  1231. if (ret < 0)
  1232. return ret;
  1233. fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
  1234. if (new) {
  1235. ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
  1236. ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
  1237. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN,
  1238. ret);
  1239. if (ret < 0)
  1240. return ret;
  1241. }
  1242. /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
  1243. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_1);
  1244. if (ret < 0)
  1245. return ret;
  1246. fid |= (ret & PORT_CONTROL_1_FID_11_4_MASK) << 4;
  1247. if (new) {
  1248. ret &= ~PORT_CONTROL_1_FID_11_4_MASK;
  1249. ret |= (*new >> 4) & PORT_CONTROL_1_FID_11_4_MASK;
  1250. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1,
  1251. ret);
  1252. if (ret < 0)
  1253. return ret;
  1254. netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid);
  1255. }
  1256. if (old)
  1257. *old = fid;
  1258. return 0;
  1259. }
  1260. static int _mv88e6xxx_port_fid_get(struct dsa_switch *ds, int port, u16 *fid)
  1261. {
  1262. return _mv88e6xxx_port_fid(ds, port, NULL, fid);
  1263. }
  1264. static int _mv88e6xxx_port_fid_set(struct dsa_switch *ds, int port, u16 fid)
  1265. {
  1266. return _mv88e6xxx_port_fid(ds, port, &fid, NULL);
  1267. }
  1268. static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
  1269. {
  1270. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1271. DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
  1272. struct mv88e6xxx_vtu_stu_entry vlan;
  1273. int i, err;
  1274. bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
  1275. /* Set every FID bit used by the (un)bridged ports */
  1276. for (i = 0; i < ps->num_ports; ++i) {
  1277. err = _mv88e6xxx_port_fid_get(ds, i, fid);
  1278. if (err)
  1279. return err;
  1280. set_bit(*fid, fid_bitmap);
  1281. }
  1282. /* Set every FID bit used by the VLAN entries */
  1283. err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
  1284. if (err)
  1285. return err;
  1286. do {
  1287. err = _mv88e6xxx_vtu_getnext(ds, &vlan);
  1288. if (err)
  1289. return err;
  1290. if (!vlan.valid)
  1291. break;
  1292. set_bit(vlan.fid, fid_bitmap);
  1293. } while (vlan.vid < GLOBAL_VTU_VID_MASK);
  1294. /* The reset value 0x000 is used to indicate that multiple address
  1295. * databases are not needed. Return the next positive available.
  1296. */
  1297. *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
  1298. if (unlikely(*fid == MV88E6XXX_N_FID))
  1299. return -ENOSPC;
  1300. /* Clear the database */
  1301. return _mv88e6xxx_atu_flush(ds, *fid, true);
  1302. }
  1303. static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
  1304. struct mv88e6xxx_vtu_stu_entry *entry)
  1305. {
  1306. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1307. struct mv88e6xxx_vtu_stu_entry vlan = {
  1308. .valid = true,
  1309. .vid = vid,
  1310. };
  1311. int i, err;
  1312. err = _mv88e6xxx_fid_new(ds, &vlan.fid);
  1313. if (err)
  1314. return err;
  1315. /* exclude all ports except the CPU and DSA ports */
  1316. for (i = 0; i < ps->num_ports; ++i)
  1317. vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
  1318. ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
  1319. : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
  1320. if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
  1321. mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
  1322. struct mv88e6xxx_vtu_stu_entry vstp;
  1323. /* Adding a VTU entry requires a valid STU entry. As VSTP is not
  1324. * implemented, only one STU entry is needed to cover all VTU
  1325. * entries. Thus, validate the SID 0.
  1326. */
  1327. vlan.sid = 0;
  1328. err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
  1329. if (err)
  1330. return err;
  1331. if (vstp.sid != vlan.sid || !vstp.valid) {
  1332. memset(&vstp, 0, sizeof(vstp));
  1333. vstp.valid = true;
  1334. vstp.sid = vlan.sid;
  1335. err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
  1336. if (err)
  1337. return err;
  1338. }
  1339. }
  1340. *entry = vlan;
  1341. return 0;
  1342. }
  1343. static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
  1344. struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
  1345. {
  1346. int err;
  1347. if (!vid)
  1348. return -EINVAL;
  1349. err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
  1350. if (err)
  1351. return err;
  1352. err = _mv88e6xxx_vtu_getnext(ds, entry);
  1353. if (err)
  1354. return err;
  1355. if (entry->vid != vid || !entry->valid) {
  1356. if (!creat)
  1357. return -EOPNOTSUPP;
  1358. /* -ENOENT would've been more appropriate, but switchdev expects
  1359. * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
  1360. */
  1361. err = _mv88e6xxx_vtu_new(ds, vid, entry);
  1362. }
  1363. return err;
  1364. }
  1365. static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
  1366. u16 vid_begin, u16 vid_end)
  1367. {
  1368. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1369. struct mv88e6xxx_vtu_stu_entry vlan;
  1370. int i, err;
  1371. if (!vid_begin)
  1372. return -EOPNOTSUPP;
  1373. mutex_lock(&ps->smi_mutex);
  1374. err = _mv88e6xxx_vtu_vid_write(ds, vid_begin - 1);
  1375. if (err)
  1376. goto unlock;
  1377. do {
  1378. err = _mv88e6xxx_vtu_getnext(ds, &vlan);
  1379. if (err)
  1380. goto unlock;
  1381. if (!vlan.valid)
  1382. break;
  1383. if (vlan.vid > vid_end)
  1384. break;
  1385. for (i = 0; i < ps->num_ports; ++i) {
  1386. if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
  1387. continue;
  1388. if (vlan.data[i] ==
  1389. GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
  1390. continue;
  1391. if (ps->ports[i].bridge_dev ==
  1392. ps->ports[port].bridge_dev)
  1393. break; /* same bridge, check next VLAN */
  1394. netdev_warn(ds->ports[port],
  1395. "hardware VLAN %d already used by %s\n",
  1396. vlan.vid,
  1397. netdev_name(ps->ports[i].bridge_dev));
  1398. err = -EOPNOTSUPP;
  1399. goto unlock;
  1400. }
  1401. } while (vlan.vid < vid_end);
  1402. unlock:
  1403. mutex_unlock(&ps->smi_mutex);
  1404. return err;
  1405. }
  1406. static const char * const mv88e6xxx_port_8021q_mode_names[] = {
  1407. [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
  1408. [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
  1409. [PORT_CONTROL_2_8021Q_CHECK] = "Check",
  1410. [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
  1411. };
  1412. int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
  1413. bool vlan_filtering)
  1414. {
  1415. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1416. u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
  1417. PORT_CONTROL_2_8021Q_DISABLED;
  1418. int ret;
  1419. mutex_lock(&ps->smi_mutex);
  1420. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_2);
  1421. if (ret < 0)
  1422. goto unlock;
  1423. old = ret & PORT_CONTROL_2_8021Q_MASK;
  1424. if (new != old) {
  1425. ret &= ~PORT_CONTROL_2_8021Q_MASK;
  1426. ret |= new & PORT_CONTROL_2_8021Q_MASK;
  1427. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2,
  1428. ret);
  1429. if (ret < 0)
  1430. goto unlock;
  1431. netdev_dbg(ds->ports[port], "802.1Q Mode %s (was %s)\n",
  1432. mv88e6xxx_port_8021q_mode_names[new],
  1433. mv88e6xxx_port_8021q_mode_names[old]);
  1434. }
  1435. ret = 0;
  1436. unlock:
  1437. mutex_unlock(&ps->smi_mutex);
  1438. return ret;
  1439. }
  1440. int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
  1441. const struct switchdev_obj_port_vlan *vlan,
  1442. struct switchdev_trans *trans)
  1443. {
  1444. int err;
  1445. /* If the requested port doesn't belong to the same bridge as the VLAN
  1446. * members, do not support it (yet) and fallback to software VLAN.
  1447. */
  1448. err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
  1449. vlan->vid_end);
  1450. if (err)
  1451. return err;
  1452. /* We don't need any dynamic resource from the kernel (yet),
  1453. * so skip the prepare phase.
  1454. */
  1455. return 0;
  1456. }
  1457. static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
  1458. bool untagged)
  1459. {
  1460. struct mv88e6xxx_vtu_stu_entry vlan;
  1461. int err;
  1462. err = _mv88e6xxx_vtu_get(ds, vid, &vlan, true);
  1463. if (err)
  1464. return err;
  1465. vlan.data[port] = untagged ?
  1466. GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
  1467. GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
  1468. return _mv88e6xxx_vtu_loadpurge(ds, &vlan);
  1469. }
  1470. int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
  1471. const struct switchdev_obj_port_vlan *vlan,
  1472. struct switchdev_trans *trans)
  1473. {
  1474. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1475. bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
  1476. bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
  1477. u16 vid;
  1478. int err = 0;
  1479. mutex_lock(&ps->smi_mutex);
  1480. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1481. err = _mv88e6xxx_port_vlan_add(ds, port, vid, untagged);
  1482. if (err)
  1483. goto unlock;
  1484. }
  1485. /* no PVID with ranges, otherwise it's a bug */
  1486. if (pvid)
  1487. err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
  1488. unlock:
  1489. mutex_unlock(&ps->smi_mutex);
  1490. return err;
  1491. }
  1492. static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
  1493. {
  1494. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1495. struct mv88e6xxx_vtu_stu_entry vlan;
  1496. int i, err;
  1497. err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
  1498. if (err)
  1499. return err;
  1500. /* Tell switchdev if this VLAN is handled in software */
  1501. if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
  1502. return -EOPNOTSUPP;
  1503. vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
  1504. /* keep the VLAN unless all ports are excluded */
  1505. vlan.valid = false;
  1506. for (i = 0; i < ps->num_ports; ++i) {
  1507. if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
  1508. continue;
  1509. if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
  1510. vlan.valid = true;
  1511. break;
  1512. }
  1513. }
  1514. err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
  1515. if (err)
  1516. return err;
  1517. return _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
  1518. }
  1519. int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
  1520. const struct switchdev_obj_port_vlan *vlan)
  1521. {
  1522. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1523. u16 pvid, vid;
  1524. int err = 0;
  1525. mutex_lock(&ps->smi_mutex);
  1526. err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
  1527. if (err)
  1528. goto unlock;
  1529. for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
  1530. err = _mv88e6xxx_port_vlan_del(ds, port, vid);
  1531. if (err)
  1532. goto unlock;
  1533. if (vid == pvid) {
  1534. err = _mv88e6xxx_port_pvid_set(ds, port, 0);
  1535. if (err)
  1536. goto unlock;
  1537. }
  1538. }
  1539. unlock:
  1540. mutex_unlock(&ps->smi_mutex);
  1541. return err;
  1542. }
  1543. static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
  1544. const unsigned char *addr)
  1545. {
  1546. int i, ret;
  1547. for (i = 0; i < 3; i++) {
  1548. ret = _mv88e6xxx_reg_write(
  1549. ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
  1550. (addr[i * 2] << 8) | addr[i * 2 + 1]);
  1551. if (ret < 0)
  1552. return ret;
  1553. }
  1554. return 0;
  1555. }
  1556. static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
  1557. {
  1558. int i, ret;
  1559. for (i = 0; i < 3; i++) {
  1560. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
  1561. GLOBAL_ATU_MAC_01 + i);
  1562. if (ret < 0)
  1563. return ret;
  1564. addr[i * 2] = ret >> 8;
  1565. addr[i * 2 + 1] = ret & 0xff;
  1566. }
  1567. return 0;
  1568. }
  1569. static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
  1570. struct mv88e6xxx_atu_entry *entry)
  1571. {
  1572. int ret;
  1573. ret = _mv88e6xxx_atu_wait(ds);
  1574. if (ret < 0)
  1575. return ret;
  1576. ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
  1577. if (ret < 0)
  1578. return ret;
  1579. ret = _mv88e6xxx_atu_data_write(ds, entry);
  1580. if (ret < 0)
  1581. return ret;
  1582. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid);
  1583. if (ret < 0)
  1584. return ret;
  1585. return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
  1586. }
  1587. static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
  1588. const unsigned char *addr, u16 vid,
  1589. u8 state)
  1590. {
  1591. struct mv88e6xxx_atu_entry entry = { 0 };
  1592. struct mv88e6xxx_vtu_stu_entry vlan;
  1593. int err;
  1594. /* Null VLAN ID corresponds to the port private database */
  1595. if (vid == 0)
  1596. err = _mv88e6xxx_port_fid_get(ds, port, &vlan.fid);
  1597. else
  1598. err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
  1599. if (err)
  1600. return err;
  1601. entry.fid = vlan.fid;
  1602. entry.state = state;
  1603. ether_addr_copy(entry.mac, addr);
  1604. if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
  1605. entry.trunk = false;
  1606. entry.portv_trunkid = BIT(port);
  1607. }
  1608. return _mv88e6xxx_atu_load(ds, &entry);
  1609. }
  1610. int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
  1611. const struct switchdev_obj_port_fdb *fdb,
  1612. struct switchdev_trans *trans)
  1613. {
  1614. /* We don't need any dynamic resource from the kernel (yet),
  1615. * so skip the prepare phase.
  1616. */
  1617. return 0;
  1618. }
  1619. int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
  1620. const struct switchdev_obj_port_fdb *fdb,
  1621. struct switchdev_trans *trans)
  1622. {
  1623. int state = is_multicast_ether_addr(fdb->addr) ?
  1624. GLOBAL_ATU_DATA_STATE_MC_STATIC :
  1625. GLOBAL_ATU_DATA_STATE_UC_STATIC;
  1626. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1627. int ret;
  1628. mutex_lock(&ps->smi_mutex);
  1629. ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state);
  1630. mutex_unlock(&ps->smi_mutex);
  1631. return ret;
  1632. }
  1633. int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
  1634. const struct switchdev_obj_port_fdb *fdb)
  1635. {
  1636. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1637. int ret;
  1638. mutex_lock(&ps->smi_mutex);
  1639. ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
  1640. GLOBAL_ATU_DATA_STATE_UNUSED);
  1641. mutex_unlock(&ps->smi_mutex);
  1642. return ret;
  1643. }
  1644. static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
  1645. struct mv88e6xxx_atu_entry *entry)
  1646. {
  1647. struct mv88e6xxx_atu_entry next = { 0 };
  1648. int ret;
  1649. next.fid = fid;
  1650. ret = _mv88e6xxx_atu_wait(ds);
  1651. if (ret < 0)
  1652. return ret;
  1653. ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
  1654. if (ret < 0)
  1655. return ret;
  1656. ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
  1657. if (ret < 0)
  1658. return ret;
  1659. ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
  1660. if (ret < 0)
  1661. return ret;
  1662. ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
  1663. if (ret < 0)
  1664. return ret;
  1665. next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
  1666. if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
  1667. unsigned int mask, shift;
  1668. if (ret & GLOBAL_ATU_DATA_TRUNK) {
  1669. next.trunk = true;
  1670. mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
  1671. shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
  1672. } else {
  1673. next.trunk = false;
  1674. mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
  1675. shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
  1676. }
  1677. next.portv_trunkid = (ret & mask) >> shift;
  1678. }
  1679. *entry = next;
  1680. return 0;
  1681. }
  1682. static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
  1683. int port,
  1684. struct switchdev_obj_port_fdb *fdb,
  1685. int (*cb)(struct switchdev_obj *obj))
  1686. {
  1687. struct mv88e6xxx_atu_entry addr = {
  1688. .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
  1689. };
  1690. int err;
  1691. err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
  1692. if (err)
  1693. return err;
  1694. do {
  1695. err = _mv88e6xxx_atu_getnext(ds, fid, &addr);
  1696. if (err)
  1697. break;
  1698. if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
  1699. break;
  1700. if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
  1701. bool is_static = addr.state ==
  1702. (is_multicast_ether_addr(addr.mac) ?
  1703. GLOBAL_ATU_DATA_STATE_MC_STATIC :
  1704. GLOBAL_ATU_DATA_STATE_UC_STATIC);
  1705. fdb->vid = vid;
  1706. ether_addr_copy(fdb->addr, addr.mac);
  1707. fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
  1708. err = cb(&fdb->obj);
  1709. if (err)
  1710. break;
  1711. }
  1712. } while (!is_broadcast_ether_addr(addr.mac));
  1713. return err;
  1714. }
  1715. int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
  1716. struct switchdev_obj_port_fdb *fdb,
  1717. int (*cb)(struct switchdev_obj *obj))
  1718. {
  1719. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1720. struct mv88e6xxx_vtu_stu_entry vlan = {
  1721. .vid = GLOBAL_VTU_VID_MASK, /* all ones */
  1722. };
  1723. u16 fid;
  1724. int err;
  1725. mutex_lock(&ps->smi_mutex);
  1726. /* Dump port's default Filtering Information Database (VLAN ID 0) */
  1727. err = _mv88e6xxx_port_fid_get(ds, port, &fid);
  1728. if (err)
  1729. goto unlock;
  1730. err = _mv88e6xxx_port_fdb_dump_one(ds, fid, 0, port, fdb, cb);
  1731. if (err)
  1732. goto unlock;
  1733. /* Dump VLANs' Filtering Information Databases */
  1734. err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
  1735. if (err)
  1736. goto unlock;
  1737. do {
  1738. err = _mv88e6xxx_vtu_getnext(ds, &vlan);
  1739. if (err)
  1740. break;
  1741. if (!vlan.valid)
  1742. break;
  1743. err = _mv88e6xxx_port_fdb_dump_one(ds, vlan.fid, vlan.vid, port,
  1744. fdb, cb);
  1745. if (err)
  1746. break;
  1747. } while (vlan.vid < GLOBAL_VTU_VID_MASK);
  1748. unlock:
  1749. mutex_unlock(&ps->smi_mutex);
  1750. return err;
  1751. }
  1752. int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
  1753. struct net_device *bridge)
  1754. {
  1755. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1756. u16 fid;
  1757. int i, err;
  1758. mutex_lock(&ps->smi_mutex);
  1759. /* Get or create the bridge FID and assign it to the port */
  1760. for (i = 0; i < ps->num_ports; ++i)
  1761. if (ps->ports[i].bridge_dev == bridge)
  1762. break;
  1763. if (i < ps->num_ports)
  1764. err = _mv88e6xxx_port_fid_get(ds, i, &fid);
  1765. else
  1766. err = _mv88e6xxx_fid_new(ds, &fid);
  1767. if (err)
  1768. goto unlock;
  1769. err = _mv88e6xxx_port_fid_set(ds, port, fid);
  1770. if (err)
  1771. goto unlock;
  1772. /* Assign the bridge and remap each port's VLANTable */
  1773. ps->ports[port].bridge_dev = bridge;
  1774. for (i = 0; i < ps->num_ports; ++i) {
  1775. if (ps->ports[i].bridge_dev == bridge) {
  1776. err = _mv88e6xxx_port_based_vlan_map(ds, i);
  1777. if (err)
  1778. break;
  1779. }
  1780. }
  1781. unlock:
  1782. mutex_unlock(&ps->smi_mutex);
  1783. return err;
  1784. }
  1785. void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
  1786. {
  1787. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1788. struct net_device *bridge = ps->ports[port].bridge_dev;
  1789. u16 fid;
  1790. int i;
  1791. mutex_lock(&ps->smi_mutex);
  1792. /* Give the port a fresh Filtering Information Database */
  1793. if (_mv88e6xxx_fid_new(ds, &fid) ||
  1794. _mv88e6xxx_port_fid_set(ds, port, fid))
  1795. netdev_warn(ds->ports[port], "failed to assign a new FID\n");
  1796. /* Unassign the bridge and remap each port's VLANTable */
  1797. ps->ports[port].bridge_dev = NULL;
  1798. for (i = 0; i < ps->num_ports; ++i)
  1799. if (i == port || ps->ports[i].bridge_dev == bridge)
  1800. if (_mv88e6xxx_port_based_vlan_map(ds, i))
  1801. netdev_warn(ds->ports[i], "failed to remap\n");
  1802. mutex_unlock(&ps->smi_mutex);
  1803. }
  1804. static void mv88e6xxx_bridge_work(struct work_struct *work)
  1805. {
  1806. struct mv88e6xxx_priv_state *ps;
  1807. struct dsa_switch *ds;
  1808. int port;
  1809. ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
  1810. ds = ((struct dsa_switch *)ps) - 1;
  1811. mutex_lock(&ps->smi_mutex);
  1812. for (port = 0; port < ps->num_ports; ++port)
  1813. if (test_and_clear_bit(port, ps->port_state_update_mask) &&
  1814. _mv88e6xxx_port_state(ds, port, ps->ports[port].state))
  1815. netdev_warn(ds->ports[port], "failed to update state to %s\n",
  1816. mv88e6xxx_port_state_names[ps->ports[port].state]);
  1817. mutex_unlock(&ps->smi_mutex);
  1818. }
  1819. static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
  1820. int reg, int val)
  1821. {
  1822. int ret;
  1823. ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
  1824. if (ret < 0)
  1825. goto restore_page_0;
  1826. ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
  1827. restore_page_0:
  1828. _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
  1829. return ret;
  1830. }
  1831. static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page,
  1832. int reg)
  1833. {
  1834. int ret;
  1835. ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
  1836. if (ret < 0)
  1837. goto restore_page_0;
  1838. ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
  1839. restore_page_0:
  1840. _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
  1841. return ret;
  1842. }
  1843. static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
  1844. {
  1845. int ret;
  1846. ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
  1847. MII_BMCR);
  1848. if (ret < 0)
  1849. return ret;
  1850. if (ret & BMCR_PDOWN) {
  1851. ret &= ~BMCR_PDOWN;
  1852. ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES,
  1853. PAGE_FIBER_SERDES, MII_BMCR,
  1854. ret);
  1855. }
  1856. return ret;
  1857. }
  1858. static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
  1859. {
  1860. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  1861. int ret;
  1862. u16 reg;
  1863. mutex_lock(&ps->smi_mutex);
  1864. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1865. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1866. mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
  1867. mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
  1868. /* MAC Forcing register: don't force link, speed,
  1869. * duplex or flow control state to any particular
  1870. * values on physical ports, but force the CPU port
  1871. * and all DSA ports to their maximum bandwidth and
  1872. * full duplex.
  1873. */
  1874. reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
  1875. if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
  1876. reg &= ~PORT_PCS_CTRL_UNFORCED;
  1877. reg |= PORT_PCS_CTRL_FORCE_LINK |
  1878. PORT_PCS_CTRL_LINK_UP |
  1879. PORT_PCS_CTRL_DUPLEX_FULL |
  1880. PORT_PCS_CTRL_FORCE_DUPLEX;
  1881. if (mv88e6xxx_6065_family(ds))
  1882. reg |= PORT_PCS_CTRL_100;
  1883. else
  1884. reg |= PORT_PCS_CTRL_1000;
  1885. } else {
  1886. reg |= PORT_PCS_CTRL_UNFORCED;
  1887. }
  1888. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1889. PORT_PCS_CTRL, reg);
  1890. if (ret)
  1891. goto abort;
  1892. }
  1893. /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
  1894. * disable Header mode, enable IGMP/MLD snooping, disable VLAN
  1895. * tunneling, determine priority by looking at 802.1p and IP
  1896. * priority fields (IP prio has precedence), and set STP state
  1897. * to Forwarding.
  1898. *
  1899. * If this is the CPU link, use DSA or EDSA tagging depending
  1900. * on which tagging mode was configured.
  1901. *
  1902. * If this is a link to another switch, use DSA tagging mode.
  1903. *
  1904. * If this is the upstream port for this switch, enable
  1905. * forwarding of unknown unicasts and multicasts.
  1906. */
  1907. reg = 0;
  1908. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1909. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1910. mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
  1911. mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
  1912. reg = PORT_CONTROL_IGMP_MLD_SNOOP |
  1913. PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
  1914. PORT_CONTROL_STATE_FORWARDING;
  1915. if (dsa_is_cpu_port(ds, port)) {
  1916. if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
  1917. reg |= PORT_CONTROL_DSA_TAG;
  1918. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1919. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1920. mv88e6xxx_6320_family(ds)) {
  1921. if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
  1922. reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
  1923. else
  1924. reg |= PORT_CONTROL_FRAME_MODE_DSA;
  1925. reg |= PORT_CONTROL_FORWARD_UNKNOWN |
  1926. PORT_CONTROL_FORWARD_UNKNOWN_MC;
  1927. }
  1928. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1929. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1930. mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
  1931. mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
  1932. if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
  1933. reg |= PORT_CONTROL_EGRESS_ADD_TAG;
  1934. }
  1935. }
  1936. if (dsa_is_dsa_port(ds, port)) {
  1937. if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
  1938. reg |= PORT_CONTROL_DSA_TAG;
  1939. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1940. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1941. mv88e6xxx_6320_family(ds)) {
  1942. reg |= PORT_CONTROL_FRAME_MODE_DSA;
  1943. }
  1944. if (port == dsa_upstream_port(ds))
  1945. reg |= PORT_CONTROL_FORWARD_UNKNOWN |
  1946. PORT_CONTROL_FORWARD_UNKNOWN_MC;
  1947. }
  1948. if (reg) {
  1949. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1950. PORT_CONTROL, reg);
  1951. if (ret)
  1952. goto abort;
  1953. }
  1954. /* If this port is connected to a SerDes, make sure the SerDes is not
  1955. * powered down.
  1956. */
  1957. if (mv88e6xxx_6352_family(ds)) {
  1958. ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
  1959. if (ret < 0)
  1960. goto abort;
  1961. ret &= PORT_STATUS_CMODE_MASK;
  1962. if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
  1963. (ret == PORT_STATUS_CMODE_1000BASE_X) ||
  1964. (ret == PORT_STATUS_CMODE_SGMII)) {
  1965. ret = mv88e6xxx_power_on_serdes(ds);
  1966. if (ret < 0)
  1967. goto abort;
  1968. }
  1969. }
  1970. /* Port Control 2: don't force a good FCS, set the maximum frame size to
  1971. * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
  1972. * untagged frames on this port, do a destination address lookup on all
  1973. * received packets as usual, disable ARP mirroring and don't send a
  1974. * copy of all transmitted/received frames on this port to the CPU.
  1975. */
  1976. reg = 0;
  1977. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1978. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  1979. mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
  1980. reg = PORT_CONTROL_2_MAP_DA;
  1981. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  1982. mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
  1983. reg |= PORT_CONTROL_2_JUMBO_10240;
  1984. if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
  1985. /* Set the upstream port this port should use */
  1986. reg |= dsa_upstream_port(ds);
  1987. /* enable forwarding of unknown multicast addresses to
  1988. * the upstream port
  1989. */
  1990. if (port == dsa_upstream_port(ds))
  1991. reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
  1992. }
  1993. reg |= PORT_CONTROL_2_8021Q_DISABLED;
  1994. if (reg) {
  1995. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  1996. PORT_CONTROL_2, reg);
  1997. if (ret)
  1998. goto abort;
  1999. }
  2000. /* Port Association Vector: when learning source addresses
  2001. * of packets, add the address to the address database using
  2002. * a port bitmap that has only the bit for this port set and
  2003. * the other bits clear.
  2004. */
  2005. reg = 1 << port;
  2006. /* Disable learning for DSA and CPU ports */
  2007. if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
  2008. reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
  2009. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
  2010. if (ret)
  2011. goto abort;
  2012. /* Egress rate control 2: disable egress rate control. */
  2013. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
  2014. 0x0000);
  2015. if (ret)
  2016. goto abort;
  2017. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  2018. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  2019. mv88e6xxx_6320_family(ds)) {
  2020. /* Do not limit the period of time that this port can
  2021. * be paused for by the remote end or the period of
  2022. * time that this port can pause the remote end.
  2023. */
  2024. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  2025. PORT_PAUSE_CTRL, 0x0000);
  2026. if (ret)
  2027. goto abort;
  2028. /* Port ATU control: disable limiting the number of
  2029. * address database entries that this port is allowed
  2030. * to use.
  2031. */
  2032. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  2033. PORT_ATU_CONTROL, 0x0000);
  2034. /* Priority Override: disable DA, SA and VTU priority
  2035. * override.
  2036. */
  2037. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  2038. PORT_PRI_OVERRIDE, 0x0000);
  2039. if (ret)
  2040. goto abort;
  2041. /* Port Ethertype: use the Ethertype DSA Ethertype
  2042. * value.
  2043. */
  2044. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  2045. PORT_ETH_TYPE, ETH_P_EDSA);
  2046. if (ret)
  2047. goto abort;
  2048. /* Tag Remap: use an identity 802.1p prio -> switch
  2049. * prio mapping.
  2050. */
  2051. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  2052. PORT_TAG_REGMAP_0123, 0x3210);
  2053. if (ret)
  2054. goto abort;
  2055. /* Tag Remap 2: use an identity 802.1p prio -> switch
  2056. * prio mapping.
  2057. */
  2058. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  2059. PORT_TAG_REGMAP_4567, 0x7654);
  2060. if (ret)
  2061. goto abort;
  2062. }
  2063. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  2064. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  2065. mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
  2066. mv88e6xxx_6320_family(ds)) {
  2067. /* Rate Control: disable ingress rate limiting. */
  2068. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
  2069. PORT_RATE_CONTROL, 0x0001);
  2070. if (ret)
  2071. goto abort;
  2072. }
  2073. /* Port Control 1: disable trunking, disable sending
  2074. * learning messages to this port.
  2075. */
  2076. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
  2077. if (ret)
  2078. goto abort;
  2079. /* Port based VLAN map: give each port its own address
  2080. * database, and allow bidirectional communication between the
  2081. * CPU and DSA port(s), and the other ports.
  2082. */
  2083. ret = _mv88e6xxx_port_fid_set(ds, port, port + 1);
  2084. if (ret)
  2085. goto abort;
  2086. ret = _mv88e6xxx_port_based_vlan_map(ds, port);
  2087. if (ret)
  2088. goto abort;
  2089. /* Default VLAN ID and priority: don't set a default VLAN
  2090. * ID, and set the default packet priority to zero.
  2091. */
  2092. ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
  2093. 0x0000);
  2094. abort:
  2095. mutex_unlock(&ps->smi_mutex);
  2096. return ret;
  2097. }
  2098. int mv88e6xxx_setup_ports(struct dsa_switch *ds)
  2099. {
  2100. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2101. int ret;
  2102. int i;
  2103. for (i = 0; i < ps->num_ports; i++) {
  2104. ret = mv88e6xxx_setup_port(ds, i);
  2105. if (ret < 0)
  2106. return ret;
  2107. }
  2108. return 0;
  2109. }
  2110. int mv88e6xxx_setup_common(struct dsa_switch *ds)
  2111. {
  2112. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2113. mutex_init(&ps->smi_mutex);
  2114. ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
  2115. INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
  2116. return 0;
  2117. }
  2118. int mv88e6xxx_setup_global(struct dsa_switch *ds)
  2119. {
  2120. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2121. int ret;
  2122. int i;
  2123. /* Set the default address aging time to 5 minutes, and
  2124. * enable address learn messages to be sent to all message
  2125. * ports.
  2126. */
  2127. REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
  2128. 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
  2129. /* Configure the IP ToS mapping registers. */
  2130. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
  2131. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
  2132. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
  2133. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
  2134. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
  2135. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
  2136. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
  2137. REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
  2138. /* Configure the IEEE 802.1p priority mapping register. */
  2139. REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
  2140. /* Send all frames with destination addresses matching
  2141. * 01:80:c2:00:00:0x to the CPU port.
  2142. */
  2143. REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
  2144. /* Ignore removed tag data on doubly tagged packets, disable
  2145. * flow control messages, force flow control priority to the
  2146. * highest, and send all special multicast frames to the CPU
  2147. * port at the highest priority.
  2148. */
  2149. REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
  2150. 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
  2151. GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
  2152. /* Program the DSA routing table. */
  2153. for (i = 0; i < 32; i++) {
  2154. int nexthop = 0x1f;
  2155. if (ds->pd->rtable &&
  2156. i != ds->index && i < ds->dst->pd->nr_chips)
  2157. nexthop = ds->pd->rtable[i] & 0x1f;
  2158. REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
  2159. GLOBAL2_DEVICE_MAPPING_UPDATE |
  2160. (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
  2161. nexthop);
  2162. }
  2163. /* Clear all trunk masks. */
  2164. for (i = 0; i < 8; i++)
  2165. REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
  2166. 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
  2167. ((1 << ps->num_ports) - 1));
  2168. /* Clear all trunk mappings. */
  2169. for (i = 0; i < 16; i++)
  2170. REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
  2171. GLOBAL2_TRUNK_MAPPING_UPDATE |
  2172. (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
  2173. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  2174. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  2175. mv88e6xxx_6320_family(ds)) {
  2176. /* Send all frames with destination addresses matching
  2177. * 01:80:c2:00:00:2x to the CPU port.
  2178. */
  2179. REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
  2180. /* Initialise cross-chip port VLAN table to reset
  2181. * defaults.
  2182. */
  2183. REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
  2184. /* Clear the priority override table. */
  2185. for (i = 0; i < 16; i++)
  2186. REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
  2187. 0x8000 | (i << 8));
  2188. }
  2189. if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
  2190. mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
  2191. mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
  2192. mv88e6xxx_6320_family(ds)) {
  2193. /* Disable ingress rate limiting by resetting all
  2194. * ingress rate limit registers to their initial
  2195. * state.
  2196. */
  2197. for (i = 0; i < ps->num_ports; i++)
  2198. REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
  2199. 0x9000 | (i << 8));
  2200. }
  2201. /* Clear the statistics counters for all ports */
  2202. REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
  2203. /* Wait for the flush to complete. */
  2204. mutex_lock(&ps->smi_mutex);
  2205. ret = _mv88e6xxx_stats_wait(ds);
  2206. if (ret < 0)
  2207. goto unlock;
  2208. /* Clear all ATU entries */
  2209. ret = _mv88e6xxx_atu_flush(ds, 0, true);
  2210. if (ret < 0)
  2211. goto unlock;
  2212. /* Clear all the VTU and STU entries */
  2213. ret = _mv88e6xxx_vtu_stu_flush(ds);
  2214. unlock:
  2215. mutex_unlock(&ps->smi_mutex);
  2216. return ret;
  2217. }
  2218. int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
  2219. {
  2220. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2221. u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
  2222. struct gpio_desc *gpiod = ds->pd->reset;
  2223. unsigned long timeout;
  2224. int ret;
  2225. int i;
  2226. /* Set all ports to the disabled state. */
  2227. for (i = 0; i < ps->num_ports; i++) {
  2228. ret = REG_READ(REG_PORT(i), PORT_CONTROL);
  2229. REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
  2230. }
  2231. /* Wait for transmit queues to drain. */
  2232. usleep_range(2000, 4000);
  2233. /* If there is a gpio connected to the reset pin, toggle it */
  2234. if (gpiod) {
  2235. gpiod_set_value_cansleep(gpiod, 1);
  2236. usleep_range(10000, 20000);
  2237. gpiod_set_value_cansleep(gpiod, 0);
  2238. usleep_range(10000, 20000);
  2239. }
  2240. /* Reset the switch. Keep the PPU active if requested. The PPU
  2241. * needs to be active to support indirect phy register access
  2242. * through global registers 0x18 and 0x19.
  2243. */
  2244. if (ppu_active)
  2245. REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
  2246. else
  2247. REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
  2248. /* Wait up to one second for reset to complete. */
  2249. timeout = jiffies + 1 * HZ;
  2250. while (time_before(jiffies, timeout)) {
  2251. ret = REG_READ(REG_GLOBAL, 0x00);
  2252. if ((ret & is_reset) == is_reset)
  2253. break;
  2254. usleep_range(1000, 2000);
  2255. }
  2256. if (time_after(jiffies, timeout))
  2257. return -ETIMEDOUT;
  2258. return 0;
  2259. }
  2260. int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
  2261. {
  2262. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2263. int ret;
  2264. mutex_lock(&ps->smi_mutex);
  2265. ret = _mv88e6xxx_phy_page_read(ds, port, page, reg);
  2266. mutex_unlock(&ps->smi_mutex);
  2267. return ret;
  2268. }
  2269. int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
  2270. int reg, int val)
  2271. {
  2272. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2273. int ret;
  2274. mutex_lock(&ps->smi_mutex);
  2275. ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val);
  2276. mutex_unlock(&ps->smi_mutex);
  2277. return ret;
  2278. }
  2279. static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
  2280. {
  2281. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2282. if (port >= 0 && port < ps->num_ports)
  2283. return port;
  2284. return -EINVAL;
  2285. }
  2286. int
  2287. mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
  2288. {
  2289. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2290. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2291. int ret;
  2292. if (addr < 0)
  2293. return addr;
  2294. mutex_lock(&ps->smi_mutex);
  2295. ret = _mv88e6xxx_phy_read(ds, addr, regnum);
  2296. mutex_unlock(&ps->smi_mutex);
  2297. return ret;
  2298. }
  2299. int
  2300. mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
  2301. {
  2302. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2303. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2304. int ret;
  2305. if (addr < 0)
  2306. return addr;
  2307. mutex_lock(&ps->smi_mutex);
  2308. ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
  2309. mutex_unlock(&ps->smi_mutex);
  2310. return ret;
  2311. }
  2312. int
  2313. mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
  2314. {
  2315. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2316. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2317. int ret;
  2318. if (addr < 0)
  2319. return addr;
  2320. mutex_lock(&ps->smi_mutex);
  2321. ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
  2322. mutex_unlock(&ps->smi_mutex);
  2323. return ret;
  2324. }
  2325. int
  2326. mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
  2327. u16 val)
  2328. {
  2329. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2330. int addr = mv88e6xxx_port_to_phy_addr(ds, port);
  2331. int ret;
  2332. if (addr < 0)
  2333. return addr;
  2334. mutex_lock(&ps->smi_mutex);
  2335. ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
  2336. mutex_unlock(&ps->smi_mutex);
  2337. return ret;
  2338. }
  2339. #ifdef CONFIG_NET_DSA_HWMON
  2340. static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
  2341. {
  2342. struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
  2343. int ret;
  2344. int val;
  2345. *temp = 0;
  2346. mutex_lock(&ps->smi_mutex);
  2347. ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
  2348. if (ret < 0)
  2349. goto error;
  2350. /* Enable temperature sensor */
  2351. ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
  2352. if (ret < 0)
  2353. goto error;
  2354. ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
  2355. if (ret < 0)
  2356. goto error;
  2357. /* Wait for temperature to stabilize */
  2358. usleep_range(10000, 12000);
  2359. val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
  2360. if (val < 0) {
  2361. ret = val;
  2362. goto error;
  2363. }
  2364. /* Disable temperature sensor */
  2365. ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
  2366. if (ret < 0)
  2367. goto error;
  2368. *temp = ((val & 0x1f) - 5) * 5;
  2369. error:
  2370. _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
  2371. mutex_unlock(&ps->smi_mutex);
  2372. return ret;
  2373. }
  2374. static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
  2375. {
  2376. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2377. int ret;
  2378. *temp = 0;
  2379. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
  2380. if (ret < 0)
  2381. return ret;
  2382. *temp = (ret & 0xff) - 25;
  2383. return 0;
  2384. }
  2385. int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
  2386. {
  2387. if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
  2388. return mv88e63xx_get_temp(ds, temp);
  2389. return mv88e61xx_get_temp(ds, temp);
  2390. }
  2391. int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
  2392. {
  2393. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2394. int ret;
  2395. if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
  2396. return -EOPNOTSUPP;
  2397. *temp = 0;
  2398. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
  2399. if (ret < 0)
  2400. return ret;
  2401. *temp = (((ret >> 8) & 0x1f) * 5) - 25;
  2402. return 0;
  2403. }
  2404. int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
  2405. {
  2406. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2407. int ret;
  2408. if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
  2409. return -EOPNOTSUPP;
  2410. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
  2411. if (ret < 0)
  2412. return ret;
  2413. temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
  2414. return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
  2415. (ret & 0xe0ff) | (temp << 8));
  2416. }
  2417. int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
  2418. {
  2419. int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
  2420. int ret;
  2421. if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
  2422. return -EOPNOTSUPP;
  2423. *alarm = false;
  2424. ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
  2425. if (ret < 0)
  2426. return ret;
  2427. *alarm = !!(ret & 0x40);
  2428. return 0;
  2429. }
  2430. #endif /* CONFIG_NET_DSA_HWMON */
  2431. char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr,
  2432. const struct mv88e6xxx_switch_id *table,
  2433. unsigned int num)
  2434. {
  2435. struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
  2436. int i, ret;
  2437. if (!bus)
  2438. return NULL;
  2439. ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
  2440. if (ret < 0)
  2441. return NULL;
  2442. /* Look up the exact switch ID */
  2443. for (i = 0; i < num; ++i)
  2444. if (table[i].id == ret)
  2445. return table[i].name;
  2446. /* Look up only the product number */
  2447. for (i = 0; i < num; ++i) {
  2448. if (table[i].id == (ret & PORT_SWITCH_ID_PROD_NUM_MASK)) {
  2449. dev_warn(host_dev, "unknown revision %d, using base switch 0x%x\n",
  2450. ret & PORT_SWITCH_ID_REV_MASK,
  2451. ret & PORT_SWITCH_ID_PROD_NUM_MASK);
  2452. return table[i].name;
  2453. }
  2454. }
  2455. return NULL;
  2456. }
  2457. static int __init mv88e6xxx_init(void)
  2458. {
  2459. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
  2460. register_switch_driver(&mv88e6131_switch_driver);
  2461. #endif
  2462. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
  2463. register_switch_driver(&mv88e6123_switch_driver);
  2464. #endif
  2465. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
  2466. register_switch_driver(&mv88e6352_switch_driver);
  2467. #endif
  2468. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
  2469. register_switch_driver(&mv88e6171_switch_driver);
  2470. #endif
  2471. return 0;
  2472. }
  2473. module_init(mv88e6xxx_init);
  2474. static void __exit mv88e6xxx_cleanup(void)
  2475. {
  2476. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
  2477. unregister_switch_driver(&mv88e6171_switch_driver);
  2478. #endif
  2479. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
  2480. unregister_switch_driver(&mv88e6352_switch_driver);
  2481. #endif
  2482. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
  2483. unregister_switch_driver(&mv88e6123_switch_driver);
  2484. #endif
  2485. #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
  2486. unregister_switch_driver(&mv88e6131_switch_driver);
  2487. #endif
  2488. }
  2489. module_exit(mv88e6xxx_cleanup);
  2490. MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
  2491. MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
  2492. MODULE_LICENSE("GPL");