mvpp2_prs.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467
  1. /*
  2. * Header Parser helpers for Marvell PPv2 Network Controller
  3. *
  4. * Copyright (C) 2014 Marvell
  5. *
  6. * Marcin Wojtas <mw@semihalf.com>
  7. *
  8. * This file is licensed under the terms of the GNU General Public
  9. * License version 2. This program is licensed "as is" without any
  10. * warranty of any kind, whether express or implied.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/netdevice.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/platform_device.h>
  16. #include <uapi/linux/ppp_defs.h>
  17. #include <net/ip.h>
  18. #include <net/ipv6.h>
  19. #include "mvpp2.h"
  20. #include "mvpp2_prs.h"
  21. /* Update parser tcam and sram hw entries */
  22. static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
  23. {
  24. int i;
  25. if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  26. return -EINVAL;
  27. /* Clear entry invalidation bit */
  28. pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
  29. /* Write tcam index - indirect access */
  30. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  31. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  32. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
  33. /* Write sram index - indirect access */
  34. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  35. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  36. mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
  37. return 0;
  38. }
  39. /* Initialize tcam entry from hw */
  40. static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
  41. struct mvpp2_prs_entry *pe, int tid)
  42. {
  43. int i;
  44. if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
  45. return -EINVAL;
  46. memset(pe, 0, sizeof(*pe));
  47. pe->index = tid;
  48. /* Write tcam index - indirect access */
  49. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
  50. pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
  51. MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
  52. if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
  53. return MVPP2_PRS_TCAM_ENTRY_INVALID;
  54. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  55. pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
  56. /* Write sram index - indirect access */
  57. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
  58. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  59. pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
  60. return 0;
  61. }
  62. /* Invalidate tcam hw entry */
  63. static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
  64. {
  65. /* Write index - indirect access */
  66. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
  67. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
  68. MVPP2_PRS_TCAM_INV_MASK);
  69. }
  70. /* Enable shadow table entry and set its lookup ID */
  71. static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
  72. {
  73. priv->prs_shadow[index].valid = true;
  74. priv->prs_shadow[index].lu = lu;
  75. }
  76. /* Update ri fields in shadow table entry */
  77. static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
  78. unsigned int ri, unsigned int ri_mask)
  79. {
  80. priv->prs_shadow[index].ri_mask = ri_mask;
  81. priv->prs_shadow[index].ri = ri;
  82. }
  83. /* Update lookup field in tcam sw entry */
  84. static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
  85. {
  86. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
  87. pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
  88. pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
  89. }
  90. /* Update mask for single port in tcam sw entry */
  91. static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
  92. unsigned int port, bool add)
  93. {
  94. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
  95. if (add)
  96. pe->tcam.byte[enable_off] &= ~(1 << port);
  97. else
  98. pe->tcam.byte[enable_off] |= 1 << port;
  99. }
  100. /* Update port map in tcam sw entry */
  101. static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
  102. unsigned int ports)
  103. {
  104. unsigned char port_mask = MVPP2_PRS_PORT_MASK;
  105. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
  106. pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
  107. pe->tcam.byte[enable_off] &= ~port_mask;
  108. pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
  109. }
  110. /* Obtain port map from tcam sw entry */
  111. static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
  112. {
  113. int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
  114. return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
  115. }
  116. /* Set byte of data and its enable bits in tcam sw entry */
  117. static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
  118. unsigned int offs, unsigned char byte,
  119. unsigned char enable)
  120. {
  121. pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
  122. pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
  123. }
  124. /* Get byte of data and its enable bits from tcam sw entry */
  125. static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
  126. unsigned int offs, unsigned char *byte,
  127. unsigned char *enable)
  128. {
  129. *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
  130. *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
  131. }
  132. /* Compare tcam data bytes with a pattern */
  133. static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
  134. u16 data)
  135. {
  136. int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
  137. u16 tcam_data;
  138. tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
  139. if (tcam_data != data)
  140. return false;
  141. return true;
  142. }
  143. /* Update ai bits in tcam sw entry */
  144. static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
  145. unsigned int bits, unsigned int enable)
  146. {
  147. int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
  148. for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
  149. if (!(enable & BIT(i)))
  150. continue;
  151. if (bits & BIT(i))
  152. pe->tcam.byte[ai_idx] |= 1 << i;
  153. else
  154. pe->tcam.byte[ai_idx] &= ~(1 << i);
  155. }
  156. pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
  157. }
  158. /* Get ai bits from tcam sw entry */
  159. static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
  160. {
  161. return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
  162. }
  163. /* Set ethertype in tcam sw entry */
  164. static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
  165. unsigned short ethertype)
  166. {
  167. mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
  168. mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
  169. }
  170. /* Set vid in tcam sw entry */
  171. static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
  172. unsigned short vid)
  173. {
  174. mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
  175. mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
  176. }
  177. /* Set bits in sram sw entry */
  178. static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
  179. int val)
  180. {
  181. pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
  182. }
  183. /* Clear bits in sram sw entry */
  184. static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
  185. int val)
  186. {
  187. pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
  188. }
  189. /* Update ri bits in sram sw entry */
  190. static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
  191. unsigned int bits, unsigned int mask)
  192. {
  193. unsigned int i;
  194. for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
  195. int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
  196. if (!(mask & BIT(i)))
  197. continue;
  198. if (bits & BIT(i))
  199. mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
  200. else
  201. mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
  202. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
  203. }
  204. }
  205. /* Obtain ri bits from sram sw entry */
  206. static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
  207. {
  208. return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
  209. }
  210. /* Update ai bits in sram sw entry */
  211. static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
  212. unsigned int bits, unsigned int mask)
  213. {
  214. unsigned int i;
  215. int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
  216. for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
  217. if (!(mask & BIT(i)))
  218. continue;
  219. if (bits & BIT(i))
  220. mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
  221. else
  222. mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
  223. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
  224. }
  225. }
  226. /* Read ai bits from sram sw entry */
  227. static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
  228. {
  229. u8 bits;
  230. int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
  231. int ai_en_off = ai_off + 1;
  232. int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
  233. bits = (pe->sram.byte[ai_off] >> ai_shift) |
  234. (pe->sram.byte[ai_en_off] << (8 - ai_shift));
  235. return bits;
  236. }
  237. /* In sram sw entry set lookup ID field of the tcam key to be used in the next
  238. * lookup interation
  239. */
  240. static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
  241. unsigned int lu)
  242. {
  243. int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
  244. mvpp2_prs_sram_bits_clear(pe, sram_next_off,
  245. MVPP2_PRS_SRAM_NEXT_LU_MASK);
  246. mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
  247. }
  248. /* In the sram sw entry set sign and value of the next lookup offset
  249. * and the offset value generated to the classifier
  250. */
  251. static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
  252. unsigned int op)
  253. {
  254. /* Set sign */
  255. if (shift < 0) {
  256. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
  257. shift = 0 - shift;
  258. } else {
  259. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
  260. }
  261. /* Set value */
  262. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
  263. (unsigned char)shift;
  264. /* Reset and set operation */
  265. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
  266. MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
  267. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
  268. /* Set base offset as current */
  269. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
  270. }
  271. /* In the sram sw entry set sign and value of the user defined offset
  272. * generated to the classifier
  273. */
  274. static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
  275. unsigned int type, int offset,
  276. unsigned int op)
  277. {
  278. /* Set sign */
  279. if (offset < 0) {
  280. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
  281. offset = 0 - offset;
  282. } else {
  283. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
  284. }
  285. /* Set value */
  286. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
  287. MVPP2_PRS_SRAM_UDF_MASK);
  288. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
  289. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
  290. MVPP2_PRS_SRAM_UDF_BITS)] &=
  291. ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
  292. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
  293. MVPP2_PRS_SRAM_UDF_BITS)] |=
  294. (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
  295. /* Set offset type */
  296. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
  297. MVPP2_PRS_SRAM_UDF_TYPE_MASK);
  298. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
  299. /* Set offset operation */
  300. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
  301. MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
  302. mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
  303. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
  304. MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
  305. ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
  306. (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
  307. pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
  308. MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
  309. (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
  310. /* Set base offset as current */
  311. mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
  312. }
  313. /* Find parser flow entry */
  314. static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
  315. {
  316. struct mvpp2_prs_entry pe;
  317. int tid;
  318. /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
  319. for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
  320. u8 bits;
  321. if (!priv->prs_shadow[tid].valid ||
  322. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
  323. continue;
  324. mvpp2_prs_init_from_hw(priv, &pe, tid);
  325. bits = mvpp2_prs_sram_ai_get(&pe);
  326. /* Sram store classification lookup ID in AI bits [5:0] */
  327. if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
  328. return tid;
  329. }
  330. return -ENOENT;
  331. }
  332. /* Return first free tcam index, seeking from start to end */
  333. static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
  334. unsigned char end)
  335. {
  336. int tid;
  337. if (start > end)
  338. swap(start, end);
  339. if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
  340. end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
  341. for (tid = start; tid <= end; tid++) {
  342. if (!priv->prs_shadow[tid].valid)
  343. return tid;
  344. }
  345. return -EINVAL;
  346. }
  347. /* Enable/disable dropping all mac da's */
  348. static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
  349. {
  350. struct mvpp2_prs_entry pe;
  351. if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
  352. /* Entry exist - update port only */
  353. mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
  354. } else {
  355. /* Entry doesn't exist - create new */
  356. memset(&pe, 0, sizeof(pe));
  357. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  358. pe.index = MVPP2_PE_DROP_ALL;
  359. /* Non-promiscuous mode for all ports - DROP unknown packets */
  360. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  361. MVPP2_PRS_RI_DROP_MASK);
  362. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  363. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  364. /* Update shadow table */
  365. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  366. /* Mask all ports */
  367. mvpp2_prs_tcam_port_map_set(&pe, 0);
  368. }
  369. /* Update port mask */
  370. mvpp2_prs_tcam_port_set(&pe, port, add);
  371. mvpp2_prs_hw_write(priv, &pe);
  372. }
  373. /* Set port to unicast or multicast promiscuous mode */
  374. void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
  375. enum mvpp2_prs_l2_cast l2_cast, bool add)
  376. {
  377. struct mvpp2_prs_entry pe;
  378. unsigned char cast_match;
  379. unsigned int ri;
  380. int tid;
  381. if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
  382. cast_match = MVPP2_PRS_UCAST_VAL;
  383. tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
  384. ri = MVPP2_PRS_RI_L2_UCAST;
  385. } else {
  386. cast_match = MVPP2_PRS_MCAST_VAL;
  387. tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
  388. ri = MVPP2_PRS_RI_L2_MCAST;
  389. }
  390. /* promiscuous mode - Accept unknown unicast or multicast packets */
  391. if (priv->prs_shadow[tid].valid) {
  392. mvpp2_prs_init_from_hw(priv, &pe, tid);
  393. } else {
  394. memset(&pe, 0, sizeof(pe));
  395. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  396. pe.index = tid;
  397. /* Continue - set next lookup */
  398. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
  399. /* Set result info bits */
  400. mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
  401. /* Match UC or MC addresses */
  402. mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
  403. MVPP2_PRS_CAST_MASK);
  404. /* Shift to ethertype */
  405. mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
  406. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  407. /* Mask all ports */
  408. mvpp2_prs_tcam_port_map_set(&pe, 0);
  409. /* Update shadow table */
  410. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  411. }
  412. /* Update port mask */
  413. mvpp2_prs_tcam_port_set(&pe, port, add);
  414. mvpp2_prs_hw_write(priv, &pe);
  415. }
  416. /* Set entry for dsa packets */
  417. static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
  418. bool tagged, bool extend)
  419. {
  420. struct mvpp2_prs_entry pe;
  421. int tid, shift;
  422. if (extend) {
  423. tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
  424. shift = 8;
  425. } else {
  426. tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
  427. shift = 4;
  428. }
  429. if (priv->prs_shadow[tid].valid) {
  430. /* Entry exist - update port only */
  431. mvpp2_prs_init_from_hw(priv, &pe, tid);
  432. } else {
  433. /* Entry doesn't exist - create new */
  434. memset(&pe, 0, sizeof(pe));
  435. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
  436. pe.index = tid;
  437. /* Update shadow table */
  438. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
  439. if (tagged) {
  440. /* Set tagged bit in DSA tag */
  441. mvpp2_prs_tcam_data_byte_set(&pe, 0,
  442. MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
  443. MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
  444. /* Set ai bits for next iteration */
  445. if (extend)
  446. mvpp2_prs_sram_ai_update(&pe, 1,
  447. MVPP2_PRS_SRAM_AI_MASK);
  448. else
  449. mvpp2_prs_sram_ai_update(&pe, 0,
  450. MVPP2_PRS_SRAM_AI_MASK);
  451. /* Set result info bits to 'single vlan' */
  452. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
  453. MVPP2_PRS_RI_VLAN_MASK);
  454. /* If packet is tagged continue check vid filtering */
  455. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
  456. } else {
  457. /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
  458. mvpp2_prs_sram_shift_set(&pe, shift,
  459. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  460. /* Set result info bits to 'no vlans' */
  461. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
  462. MVPP2_PRS_RI_VLAN_MASK);
  463. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  464. }
  465. /* Mask all ports */
  466. mvpp2_prs_tcam_port_map_set(&pe, 0);
  467. }
  468. /* Update port mask */
  469. mvpp2_prs_tcam_port_set(&pe, port, add);
  470. mvpp2_prs_hw_write(priv, &pe);
  471. }
  472. /* Set entry for dsa ethertype */
  473. static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
  474. bool add, bool tagged, bool extend)
  475. {
  476. struct mvpp2_prs_entry pe;
  477. int tid, shift, port_mask;
  478. if (extend) {
  479. tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
  480. MVPP2_PE_ETYPE_EDSA_UNTAGGED;
  481. port_mask = 0;
  482. shift = 8;
  483. } else {
  484. tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
  485. MVPP2_PE_ETYPE_DSA_UNTAGGED;
  486. port_mask = MVPP2_PRS_PORT_MASK;
  487. shift = 4;
  488. }
  489. if (priv->prs_shadow[tid].valid) {
  490. /* Entry exist - update port only */
  491. mvpp2_prs_init_from_hw(priv, &pe, tid);
  492. } else {
  493. /* Entry doesn't exist - create new */
  494. memset(&pe, 0, sizeof(pe));
  495. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
  496. pe.index = tid;
  497. /* Set ethertype */
  498. mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
  499. mvpp2_prs_match_etype(&pe, 2, 0);
  500. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
  501. MVPP2_PRS_RI_DSA_MASK);
  502. /* Shift ethertype + 2 byte reserved + tag*/
  503. mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
  504. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  505. /* Update shadow table */
  506. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
  507. if (tagged) {
  508. /* Set tagged bit in DSA tag */
  509. mvpp2_prs_tcam_data_byte_set(&pe,
  510. MVPP2_ETH_TYPE_LEN + 2 + 3,
  511. MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
  512. MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
  513. /* Clear all ai bits for next iteration */
  514. mvpp2_prs_sram_ai_update(&pe, 0,
  515. MVPP2_PRS_SRAM_AI_MASK);
  516. /* If packet is tagged continue check vlans */
  517. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  518. } else {
  519. /* Set result info bits to 'no vlans' */
  520. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
  521. MVPP2_PRS_RI_VLAN_MASK);
  522. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  523. }
  524. /* Mask/unmask all ports, depending on dsa type */
  525. mvpp2_prs_tcam_port_map_set(&pe, port_mask);
  526. }
  527. /* Update port mask */
  528. mvpp2_prs_tcam_port_set(&pe, port, add);
  529. mvpp2_prs_hw_write(priv, &pe);
  530. }
  531. /* Search for existing single/triple vlan entry */
  532. static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
  533. {
  534. struct mvpp2_prs_entry pe;
  535. int tid;
  536. /* Go through the all entries with MVPP2_PRS_LU_VLAN */
  537. for (tid = MVPP2_PE_FIRST_FREE_TID;
  538. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  539. unsigned int ri_bits, ai_bits;
  540. bool match;
  541. if (!priv->prs_shadow[tid].valid ||
  542. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
  543. continue;
  544. mvpp2_prs_init_from_hw(priv, &pe, tid);
  545. match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
  546. if (!match)
  547. continue;
  548. /* Get vlan type */
  549. ri_bits = mvpp2_prs_sram_ri_get(&pe);
  550. ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
  551. /* Get current ai value from tcam */
  552. ai_bits = mvpp2_prs_tcam_ai_get(&pe);
  553. /* Clear double vlan bit */
  554. ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
  555. if (ai != ai_bits)
  556. continue;
  557. if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
  558. ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
  559. return tid;
  560. }
  561. return -ENOENT;
  562. }
  563. /* Add/update single/triple vlan entry */
  564. static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
  565. unsigned int port_map)
  566. {
  567. struct mvpp2_prs_entry pe;
  568. int tid_aux, tid;
  569. int ret = 0;
  570. memset(&pe, 0, sizeof(pe));
  571. tid = mvpp2_prs_vlan_find(priv, tpid, ai);
  572. if (tid < 0) {
  573. /* Create new tcam entry */
  574. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
  575. MVPP2_PE_FIRST_FREE_TID);
  576. if (tid < 0)
  577. return tid;
  578. /* Get last double vlan tid */
  579. for (tid_aux = MVPP2_PE_LAST_FREE_TID;
  580. tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
  581. unsigned int ri_bits;
  582. if (!priv->prs_shadow[tid_aux].valid ||
  583. priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
  584. continue;
  585. mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
  586. ri_bits = mvpp2_prs_sram_ri_get(&pe);
  587. if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
  588. MVPP2_PRS_RI_VLAN_DOUBLE)
  589. break;
  590. }
  591. if (tid <= tid_aux)
  592. return -EINVAL;
  593. memset(&pe, 0, sizeof(pe));
  594. pe.index = tid;
  595. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  596. mvpp2_prs_match_etype(&pe, 0, tpid);
  597. /* VLAN tag detected, proceed with VID filtering */
  598. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
  599. /* Clear all ai bits for next iteration */
  600. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  601. if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
  602. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
  603. MVPP2_PRS_RI_VLAN_MASK);
  604. } else {
  605. ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
  606. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
  607. MVPP2_PRS_RI_VLAN_MASK);
  608. }
  609. mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
  610. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
  611. } else {
  612. mvpp2_prs_init_from_hw(priv, &pe, tid);
  613. }
  614. /* Update ports' mask */
  615. mvpp2_prs_tcam_port_map_set(&pe, port_map);
  616. mvpp2_prs_hw_write(priv, &pe);
  617. return ret;
  618. }
  619. /* Get first free double vlan ai number */
  620. static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
  621. {
  622. int i;
  623. for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
  624. if (!priv->prs_double_vlans[i])
  625. return i;
  626. }
  627. return -EINVAL;
  628. }
  629. /* Search for existing double vlan entry */
  630. static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
  631. unsigned short tpid2)
  632. {
  633. struct mvpp2_prs_entry pe;
  634. int tid;
  635. /* Go through the all entries with MVPP2_PRS_LU_VLAN */
  636. for (tid = MVPP2_PE_FIRST_FREE_TID;
  637. tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
  638. unsigned int ri_mask;
  639. bool match;
  640. if (!priv->prs_shadow[tid].valid ||
  641. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
  642. continue;
  643. mvpp2_prs_init_from_hw(priv, &pe, tid);
  644. match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
  645. mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
  646. if (!match)
  647. continue;
  648. ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
  649. if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
  650. return tid;
  651. }
  652. return -ENOENT;
  653. }
  654. /* Add or update double vlan entry */
  655. static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
  656. unsigned short tpid2,
  657. unsigned int port_map)
  658. {
  659. int tid_aux, tid, ai, ret = 0;
  660. struct mvpp2_prs_entry pe;
  661. memset(&pe, 0, sizeof(pe));
  662. tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
  663. if (tid < 0) {
  664. /* Create new tcam entry */
  665. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  666. MVPP2_PE_LAST_FREE_TID);
  667. if (tid < 0)
  668. return tid;
  669. /* Set ai value for new double vlan entry */
  670. ai = mvpp2_prs_double_vlan_ai_free_get(priv);
  671. if (ai < 0)
  672. return ai;
  673. /* Get first single/triple vlan tid */
  674. for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
  675. tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
  676. unsigned int ri_bits;
  677. if (!priv->prs_shadow[tid_aux].valid ||
  678. priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
  679. continue;
  680. mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
  681. ri_bits = mvpp2_prs_sram_ri_get(&pe);
  682. ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
  683. if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
  684. ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
  685. break;
  686. }
  687. if (tid >= tid_aux)
  688. return -ERANGE;
  689. memset(&pe, 0, sizeof(pe));
  690. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  691. pe.index = tid;
  692. priv->prs_double_vlans[ai] = true;
  693. mvpp2_prs_match_etype(&pe, 0, tpid1);
  694. mvpp2_prs_match_etype(&pe, 4, tpid2);
  695. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  696. /* Shift 4 bytes - skip outer vlan tag */
  697. mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
  698. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  699. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
  700. MVPP2_PRS_RI_VLAN_MASK);
  701. mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
  702. MVPP2_PRS_SRAM_AI_MASK);
  703. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
  704. } else {
  705. mvpp2_prs_init_from_hw(priv, &pe, tid);
  706. }
  707. /* Update ports' mask */
  708. mvpp2_prs_tcam_port_map_set(&pe, port_map);
  709. mvpp2_prs_hw_write(priv, &pe);
  710. return ret;
  711. }
  712. /* IPv4 header parsing for fragmentation and L4 offset */
  713. static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
  714. unsigned int ri, unsigned int ri_mask)
  715. {
  716. struct mvpp2_prs_entry pe;
  717. int tid;
  718. if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
  719. (proto != IPPROTO_IGMP))
  720. return -EINVAL;
  721. /* Not fragmented packet */
  722. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  723. MVPP2_PE_LAST_FREE_TID);
  724. if (tid < 0)
  725. return tid;
  726. memset(&pe, 0, sizeof(pe));
  727. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  728. pe.index = tid;
  729. /* Set next lu to IPv4 */
  730. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  731. mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  732. /* Set L4 offset */
  733. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  734. sizeof(struct iphdr) - 4,
  735. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  736. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  737. MVPP2_PRS_IPV4_DIP_AI_BIT);
  738. mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
  739. mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
  740. MVPP2_PRS_TCAM_PROTO_MASK_L);
  741. mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
  742. MVPP2_PRS_TCAM_PROTO_MASK);
  743. mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
  744. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
  745. /* Unmask all ports */
  746. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  747. /* Update shadow table and hw entry */
  748. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  749. mvpp2_prs_hw_write(priv, &pe);
  750. /* Fragmented packet */
  751. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  752. MVPP2_PE_LAST_FREE_TID);
  753. if (tid < 0)
  754. return tid;
  755. pe.index = tid;
  756. /* Clear ri before updating */
  757. pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  758. pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  759. mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
  760. mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
  761. ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
  762. mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
  763. mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
  764. /* Update shadow table and hw entry */
  765. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  766. mvpp2_prs_hw_write(priv, &pe);
  767. return 0;
  768. }
  769. /* IPv4 L3 multicast or broadcast */
  770. static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
  771. {
  772. struct mvpp2_prs_entry pe;
  773. int mask, tid;
  774. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  775. MVPP2_PE_LAST_FREE_TID);
  776. if (tid < 0)
  777. return tid;
  778. memset(&pe, 0, sizeof(pe));
  779. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  780. pe.index = tid;
  781. switch (l3_cast) {
  782. case MVPP2_PRS_L3_MULTI_CAST:
  783. mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
  784. MVPP2_PRS_IPV4_MC_MASK);
  785. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
  786. MVPP2_PRS_RI_L3_ADDR_MASK);
  787. break;
  788. case MVPP2_PRS_L3_BROAD_CAST:
  789. mask = MVPP2_PRS_IPV4_BC_MASK;
  790. mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
  791. mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
  792. mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
  793. mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
  794. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
  795. MVPP2_PRS_RI_L3_ADDR_MASK);
  796. break;
  797. default:
  798. return -EINVAL;
  799. }
  800. /* Finished: go to flowid generation */
  801. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  802. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  803. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  804. MVPP2_PRS_IPV4_DIP_AI_BIT);
  805. /* Unmask all ports */
  806. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  807. /* Update shadow table and hw entry */
  808. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  809. mvpp2_prs_hw_write(priv, &pe);
  810. return 0;
  811. }
  812. /* Set entries for protocols over IPv6 */
  813. static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
  814. unsigned int ri, unsigned int ri_mask)
  815. {
  816. struct mvpp2_prs_entry pe;
  817. int tid;
  818. if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
  819. (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
  820. return -EINVAL;
  821. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  822. MVPP2_PE_LAST_FREE_TID);
  823. if (tid < 0)
  824. return tid;
  825. memset(&pe, 0, sizeof(pe));
  826. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  827. pe.index = tid;
  828. /* Finished: go to flowid generation */
  829. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  830. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  831. mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
  832. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  833. sizeof(struct ipv6hdr) - 6,
  834. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  835. mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
  836. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  837. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  838. /* Unmask all ports */
  839. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  840. /* Write HW */
  841. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
  842. mvpp2_prs_hw_write(priv, &pe);
  843. return 0;
  844. }
  845. /* IPv6 L3 multicast entry */
  846. static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
  847. {
  848. struct mvpp2_prs_entry pe;
  849. int tid;
  850. if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
  851. return -EINVAL;
  852. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  853. MVPP2_PE_LAST_FREE_TID);
  854. if (tid < 0)
  855. return tid;
  856. memset(&pe, 0, sizeof(pe));
  857. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  858. pe.index = tid;
  859. /* Finished: go to flowid generation */
  860. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  861. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
  862. MVPP2_PRS_RI_L3_ADDR_MASK);
  863. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  864. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  865. /* Shift back to IPv6 NH */
  866. mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  867. mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
  868. MVPP2_PRS_IPV6_MC_MASK);
  869. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  870. /* Unmask all ports */
  871. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  872. /* Update shadow table and hw entry */
  873. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
  874. mvpp2_prs_hw_write(priv, &pe);
  875. return 0;
  876. }
  877. /* Parser per-port initialization */
  878. static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
  879. int lu_max, int offset)
  880. {
  881. u32 val;
  882. /* Set lookup ID */
  883. val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
  884. val &= ~MVPP2_PRS_PORT_LU_MASK(port);
  885. val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
  886. mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
  887. /* Set maximum number of loops for packet received from port */
  888. val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
  889. val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
  890. val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
  891. mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
  892. /* Set initial offset for packet header extraction for the first
  893. * searching loop
  894. */
  895. val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
  896. val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
  897. val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
  898. mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
  899. }
  900. /* Default flow entries initialization for all ports */
  901. static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
  902. {
  903. struct mvpp2_prs_entry pe;
  904. int port;
  905. for (port = 0; port < MVPP2_MAX_PORTS; port++) {
  906. memset(&pe, 0, sizeof(pe));
  907. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  908. pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
  909. /* Mask all ports */
  910. mvpp2_prs_tcam_port_map_set(&pe, 0);
  911. /* Set flow ID*/
  912. mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
  913. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
  914. /* Update shadow table and hw entry */
  915. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
  916. mvpp2_prs_hw_write(priv, &pe);
  917. }
  918. }
  919. /* Set default entry for Marvell Header field */
  920. static void mvpp2_prs_mh_init(struct mvpp2 *priv)
  921. {
  922. struct mvpp2_prs_entry pe;
  923. memset(&pe, 0, sizeof(pe));
  924. pe.index = MVPP2_PE_MH_DEFAULT;
  925. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
  926. mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
  927. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  928. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
  929. /* Unmask all ports */
  930. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  931. /* Update shadow table and hw entry */
  932. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
  933. mvpp2_prs_hw_write(priv, &pe);
  934. }
  935. /* Set default entires (place holder) for promiscuous, non-promiscuous and
  936. * multicast MAC addresses
  937. */
  938. static void mvpp2_prs_mac_init(struct mvpp2 *priv)
  939. {
  940. struct mvpp2_prs_entry pe;
  941. memset(&pe, 0, sizeof(pe));
  942. /* Non-promiscuous mode for all ports - DROP unknown packets */
  943. pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
  944. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  945. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  946. MVPP2_PRS_RI_DROP_MASK);
  947. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  948. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  949. /* Unmask all ports */
  950. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  951. /* Update shadow table and hw entry */
  952. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  953. mvpp2_prs_hw_write(priv, &pe);
  954. /* Create dummy entries for drop all and promiscuous modes */
  955. mvpp2_prs_mac_drop_all_set(priv, 0, false);
  956. mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
  957. mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
  958. }
  959. /* Set default entries for various types of dsa packets */
  960. static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
  961. {
  962. struct mvpp2_prs_entry pe;
  963. /* None tagged EDSA entry - place holder */
  964. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
  965. MVPP2_PRS_EDSA);
  966. /* Tagged EDSA entry - place holder */
  967. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  968. /* None tagged DSA entry - place holder */
  969. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
  970. MVPP2_PRS_DSA);
  971. /* Tagged DSA entry - place holder */
  972. mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  973. /* None tagged EDSA ethertype entry - place holder*/
  974. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
  975. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  976. /* Tagged EDSA ethertype entry - place holder*/
  977. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
  978. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  979. /* None tagged DSA ethertype entry */
  980. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
  981. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  982. /* Tagged DSA ethertype entry */
  983. mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
  984. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  985. /* Set default entry, in case DSA or EDSA tag not found */
  986. memset(&pe, 0, sizeof(pe));
  987. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
  988. pe.index = MVPP2_PE_DSA_DEFAULT;
  989. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  990. /* Shift 0 bytes */
  991. mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  992. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  993. /* Clear all sram ai bits for next iteration */
  994. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  995. /* Unmask all ports */
  996. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  997. mvpp2_prs_hw_write(priv, &pe);
  998. }
  999. /* Initialize parser entries for VID filtering */
  1000. static void mvpp2_prs_vid_init(struct mvpp2 *priv)
  1001. {
  1002. struct mvpp2_prs_entry pe;
  1003. memset(&pe, 0, sizeof(pe));
  1004. /* Set default vid entry */
  1005. pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
  1006. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
  1007. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
  1008. /* Skip VLAN header - Set offset to 4 bytes */
  1009. mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
  1010. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1011. /* Clear all ai bits for next iteration */
  1012. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1013. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1014. /* Unmask all ports */
  1015. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1016. /* Update shadow table and hw entry */
  1017. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
  1018. mvpp2_prs_hw_write(priv, &pe);
  1019. /* Set default vid entry for extended DSA*/
  1020. memset(&pe, 0, sizeof(pe));
  1021. /* Set default vid entry */
  1022. pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
  1023. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
  1024. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
  1025. MVPP2_PRS_EDSA_VID_AI_BIT);
  1026. /* Skip VLAN header - Set offset to 8 bytes */
  1027. mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
  1028. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1029. /* Clear all ai bits for next iteration */
  1030. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1031. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1032. /* Unmask all ports */
  1033. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1034. /* Update shadow table and hw entry */
  1035. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
  1036. mvpp2_prs_hw_write(priv, &pe);
  1037. }
  1038. /* Match basic ethertypes */
  1039. static int mvpp2_prs_etype_init(struct mvpp2 *priv)
  1040. {
  1041. struct mvpp2_prs_entry pe;
  1042. int tid;
  1043. /* Ethertype: PPPoE */
  1044. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1045. MVPP2_PE_LAST_FREE_TID);
  1046. if (tid < 0)
  1047. return tid;
  1048. memset(&pe, 0, sizeof(pe));
  1049. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1050. pe.index = tid;
  1051. mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
  1052. mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
  1053. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1054. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  1055. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
  1056. MVPP2_PRS_RI_PPPOE_MASK);
  1057. /* Update shadow table and hw entry */
  1058. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1059. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1060. priv->prs_shadow[pe.index].finish = false;
  1061. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
  1062. MVPP2_PRS_RI_PPPOE_MASK);
  1063. mvpp2_prs_hw_write(priv, &pe);
  1064. /* Ethertype: ARP */
  1065. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1066. MVPP2_PE_LAST_FREE_TID);
  1067. if (tid < 0)
  1068. return tid;
  1069. memset(&pe, 0, sizeof(pe));
  1070. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1071. pe.index = tid;
  1072. mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
  1073. /* Generate flow in the next iteration*/
  1074. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1075. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1076. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
  1077. MVPP2_PRS_RI_L3_PROTO_MASK);
  1078. /* Set L3 offset */
  1079. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1080. MVPP2_ETH_TYPE_LEN,
  1081. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1082. /* Update shadow table and hw entry */
  1083. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1084. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1085. priv->prs_shadow[pe.index].finish = true;
  1086. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
  1087. MVPP2_PRS_RI_L3_PROTO_MASK);
  1088. mvpp2_prs_hw_write(priv, &pe);
  1089. /* Ethertype: LBTD */
  1090. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1091. MVPP2_PE_LAST_FREE_TID);
  1092. if (tid < 0)
  1093. return tid;
  1094. memset(&pe, 0, sizeof(pe));
  1095. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1096. pe.index = tid;
  1097. mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
  1098. /* Generate flow in the next iteration*/
  1099. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1100. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1101. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1102. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1103. MVPP2_PRS_RI_CPU_CODE_MASK |
  1104. MVPP2_PRS_RI_UDF3_MASK);
  1105. /* Set L3 offset */
  1106. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1107. MVPP2_ETH_TYPE_LEN,
  1108. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1109. /* Update shadow table and hw entry */
  1110. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1111. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1112. priv->prs_shadow[pe.index].finish = true;
  1113. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1114. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1115. MVPP2_PRS_RI_CPU_CODE_MASK |
  1116. MVPP2_PRS_RI_UDF3_MASK);
  1117. mvpp2_prs_hw_write(priv, &pe);
  1118. /* Ethertype: IPv4 without options */
  1119. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1120. MVPP2_PE_LAST_FREE_TID);
  1121. if (tid < 0)
  1122. return tid;
  1123. memset(&pe, 0, sizeof(pe));
  1124. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1125. pe.index = tid;
  1126. mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
  1127. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  1128. MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
  1129. MVPP2_PRS_IPV4_HEAD_MASK |
  1130. MVPP2_PRS_IPV4_IHL_MASK);
  1131. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1132. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
  1133. MVPP2_PRS_RI_L3_PROTO_MASK);
  1134. /* Skip eth_type + 4 bytes of IP header */
  1135. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
  1136. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1137. /* Set L3 offset */
  1138. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1139. MVPP2_ETH_TYPE_LEN,
  1140. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1141. /* Update shadow table and hw entry */
  1142. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1143. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1144. priv->prs_shadow[pe.index].finish = false;
  1145. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
  1146. MVPP2_PRS_RI_L3_PROTO_MASK);
  1147. mvpp2_prs_hw_write(priv, &pe);
  1148. /* Ethertype: IPv4 with options */
  1149. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1150. MVPP2_PE_LAST_FREE_TID);
  1151. if (tid < 0)
  1152. return tid;
  1153. pe.index = tid;
  1154. /* Clear tcam data before updating */
  1155. pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
  1156. pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
  1157. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  1158. MVPP2_PRS_IPV4_HEAD,
  1159. MVPP2_PRS_IPV4_HEAD_MASK);
  1160. /* Clear ri before updating */
  1161. pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  1162. pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  1163. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
  1164. MVPP2_PRS_RI_L3_PROTO_MASK);
  1165. /* Update shadow table and hw entry */
  1166. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1167. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1168. priv->prs_shadow[pe.index].finish = false;
  1169. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
  1170. MVPP2_PRS_RI_L3_PROTO_MASK);
  1171. mvpp2_prs_hw_write(priv, &pe);
  1172. /* Ethertype: IPv6 without options */
  1173. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1174. MVPP2_PE_LAST_FREE_TID);
  1175. if (tid < 0)
  1176. return tid;
  1177. memset(&pe, 0, sizeof(pe));
  1178. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1179. pe.index = tid;
  1180. mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
  1181. /* Skip DIP of IPV6 header */
  1182. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
  1183. MVPP2_MAX_L3_ADDR_SIZE,
  1184. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1185. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1186. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
  1187. MVPP2_PRS_RI_L3_PROTO_MASK);
  1188. /* Set L3 offset */
  1189. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1190. MVPP2_ETH_TYPE_LEN,
  1191. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1192. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1193. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1194. priv->prs_shadow[pe.index].finish = false;
  1195. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
  1196. MVPP2_PRS_RI_L3_PROTO_MASK);
  1197. mvpp2_prs_hw_write(priv, &pe);
  1198. /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
  1199. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1200. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
  1201. pe.index = MVPP2_PE_ETH_TYPE_UN;
  1202. /* Unmask all ports */
  1203. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1204. /* Generate flow in the next iteration*/
  1205. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1206. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1207. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
  1208. MVPP2_PRS_RI_L3_PROTO_MASK);
  1209. /* Set L3 offset even it's unknown L3 */
  1210. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1211. MVPP2_ETH_TYPE_LEN,
  1212. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1213. /* Update shadow table and hw entry */
  1214. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
  1215. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
  1216. priv->prs_shadow[pe.index].finish = true;
  1217. mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
  1218. MVPP2_PRS_RI_L3_PROTO_MASK);
  1219. mvpp2_prs_hw_write(priv, &pe);
  1220. return 0;
  1221. }
  1222. /* Configure vlan entries and detect up to 2 successive VLAN tags.
  1223. * Possible options:
  1224. * 0x8100, 0x88A8
  1225. * 0x8100, 0x8100
  1226. * 0x8100
  1227. * 0x88A8
  1228. */
  1229. static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
  1230. {
  1231. struct mvpp2_prs_entry pe;
  1232. int err;
  1233. priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
  1234. MVPP2_PRS_DBL_VLANS_MAX,
  1235. GFP_KERNEL);
  1236. if (!priv->prs_double_vlans)
  1237. return -ENOMEM;
  1238. /* Double VLAN: 0x8100, 0x88A8 */
  1239. err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
  1240. MVPP2_PRS_PORT_MASK);
  1241. if (err)
  1242. return err;
  1243. /* Double VLAN: 0x8100, 0x8100 */
  1244. err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
  1245. MVPP2_PRS_PORT_MASK);
  1246. if (err)
  1247. return err;
  1248. /* Single VLAN: 0x88a8 */
  1249. err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
  1250. MVPP2_PRS_PORT_MASK);
  1251. if (err)
  1252. return err;
  1253. /* Single VLAN: 0x8100 */
  1254. err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
  1255. MVPP2_PRS_PORT_MASK);
  1256. if (err)
  1257. return err;
  1258. /* Set default double vlan entry */
  1259. memset(&pe, 0, sizeof(pe));
  1260. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  1261. pe.index = MVPP2_PE_VLAN_DBL;
  1262. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
  1263. /* Clear ai for next iterations */
  1264. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1265. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
  1266. MVPP2_PRS_RI_VLAN_MASK);
  1267. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
  1268. MVPP2_PRS_DBL_VLAN_AI_BIT);
  1269. /* Unmask all ports */
  1270. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1271. /* Update shadow table and hw entry */
  1272. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
  1273. mvpp2_prs_hw_write(priv, &pe);
  1274. /* Set default vlan none entry */
  1275. memset(&pe, 0, sizeof(pe));
  1276. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
  1277. pe.index = MVPP2_PE_VLAN_NONE;
  1278. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1279. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
  1280. MVPP2_PRS_RI_VLAN_MASK);
  1281. /* Unmask all ports */
  1282. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1283. /* Update shadow table and hw entry */
  1284. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
  1285. mvpp2_prs_hw_write(priv, &pe);
  1286. return 0;
  1287. }
  1288. /* Set entries for PPPoE ethertype */
  1289. static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
  1290. {
  1291. struct mvpp2_prs_entry pe;
  1292. int tid;
  1293. /* IPv4 over PPPoE with options */
  1294. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1295. MVPP2_PE_LAST_FREE_TID);
  1296. if (tid < 0)
  1297. return tid;
  1298. memset(&pe, 0, sizeof(pe));
  1299. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  1300. pe.index = tid;
  1301. mvpp2_prs_match_etype(&pe, 0, PPP_IP);
  1302. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1303. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
  1304. MVPP2_PRS_RI_L3_PROTO_MASK);
  1305. /* Skip eth_type + 4 bytes of IP header */
  1306. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
  1307. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1308. /* Set L3 offset */
  1309. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1310. MVPP2_ETH_TYPE_LEN,
  1311. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1312. /* Update shadow table and hw entry */
  1313. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  1314. mvpp2_prs_hw_write(priv, &pe);
  1315. /* IPv4 over PPPoE without options */
  1316. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1317. MVPP2_PE_LAST_FREE_TID);
  1318. if (tid < 0)
  1319. return tid;
  1320. pe.index = tid;
  1321. mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
  1322. MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
  1323. MVPP2_PRS_IPV4_HEAD_MASK |
  1324. MVPP2_PRS_IPV4_IHL_MASK);
  1325. /* Clear ri before updating */
  1326. pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
  1327. pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
  1328. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
  1329. MVPP2_PRS_RI_L3_PROTO_MASK);
  1330. /* Update shadow table and hw entry */
  1331. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  1332. mvpp2_prs_hw_write(priv, &pe);
  1333. /* IPv6 over PPPoE */
  1334. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1335. MVPP2_PE_LAST_FREE_TID);
  1336. if (tid < 0)
  1337. return tid;
  1338. memset(&pe, 0, sizeof(pe));
  1339. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  1340. pe.index = tid;
  1341. mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
  1342. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1343. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
  1344. MVPP2_PRS_RI_L3_PROTO_MASK);
  1345. /* Skip eth_type + 4 bytes of IPv6 header */
  1346. mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
  1347. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1348. /* Set L3 offset */
  1349. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1350. MVPP2_ETH_TYPE_LEN,
  1351. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1352. /* Update shadow table and hw entry */
  1353. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  1354. mvpp2_prs_hw_write(priv, &pe);
  1355. /* Non-IP over PPPoE */
  1356. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1357. MVPP2_PE_LAST_FREE_TID);
  1358. if (tid < 0)
  1359. return tid;
  1360. memset(&pe, 0, sizeof(pe));
  1361. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
  1362. pe.index = tid;
  1363. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
  1364. MVPP2_PRS_RI_L3_PROTO_MASK);
  1365. /* Finished: go to flowid generation */
  1366. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1367. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1368. /* Set L3 offset even if it's unknown L3 */
  1369. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
  1370. MVPP2_ETH_TYPE_LEN,
  1371. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1372. /* Update shadow table and hw entry */
  1373. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
  1374. mvpp2_prs_hw_write(priv, &pe);
  1375. return 0;
  1376. }
  1377. /* Initialize entries for IPv4 */
  1378. static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
  1379. {
  1380. struct mvpp2_prs_entry pe;
  1381. int err;
  1382. /* Set entries for TCP, UDP and IGMP over IPv4 */
  1383. err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
  1384. MVPP2_PRS_RI_L4_PROTO_MASK);
  1385. if (err)
  1386. return err;
  1387. err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
  1388. MVPP2_PRS_RI_L4_PROTO_MASK);
  1389. if (err)
  1390. return err;
  1391. err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
  1392. MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1393. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1394. MVPP2_PRS_RI_CPU_CODE_MASK |
  1395. MVPP2_PRS_RI_UDF3_MASK);
  1396. if (err)
  1397. return err;
  1398. /* IPv4 Broadcast */
  1399. err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
  1400. if (err)
  1401. return err;
  1402. /* IPv4 Multicast */
  1403. err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
  1404. if (err)
  1405. return err;
  1406. /* Default IPv4 entry for unknown protocols */
  1407. memset(&pe, 0, sizeof(pe));
  1408. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1409. pe.index = MVPP2_PE_IP4_PROTO_UN;
  1410. /* Set next lu to IPv4 */
  1411. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1412. mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1413. /* Set L4 offset */
  1414. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  1415. sizeof(struct iphdr) - 4,
  1416. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1417. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  1418. MVPP2_PRS_IPV4_DIP_AI_BIT);
  1419. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
  1420. MVPP2_PRS_RI_L4_PROTO_MASK);
  1421. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
  1422. /* Unmask all ports */
  1423. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1424. /* Update shadow table and hw entry */
  1425. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1426. mvpp2_prs_hw_write(priv, &pe);
  1427. /* Default IPv4 entry for unicast address */
  1428. memset(&pe, 0, sizeof(pe));
  1429. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
  1430. pe.index = MVPP2_PE_IP4_ADDR_UN;
  1431. /* Finished: go to flowid generation */
  1432. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1433. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1434. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
  1435. MVPP2_PRS_RI_L3_ADDR_MASK);
  1436. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
  1437. MVPP2_PRS_IPV4_DIP_AI_BIT);
  1438. /* Unmask all ports */
  1439. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1440. /* Update shadow table and hw entry */
  1441. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1442. mvpp2_prs_hw_write(priv, &pe);
  1443. return 0;
  1444. }
  1445. /* Initialize entries for IPv6 */
  1446. static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
  1447. {
  1448. struct mvpp2_prs_entry pe;
  1449. int tid, err;
  1450. /* Set entries for TCP, UDP and ICMP over IPv6 */
  1451. err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
  1452. MVPP2_PRS_RI_L4_TCP,
  1453. MVPP2_PRS_RI_L4_PROTO_MASK);
  1454. if (err)
  1455. return err;
  1456. err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
  1457. MVPP2_PRS_RI_L4_UDP,
  1458. MVPP2_PRS_RI_L4_PROTO_MASK);
  1459. if (err)
  1460. return err;
  1461. err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
  1462. MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
  1463. MVPP2_PRS_RI_UDF3_RX_SPECIAL,
  1464. MVPP2_PRS_RI_CPU_CODE_MASK |
  1465. MVPP2_PRS_RI_UDF3_MASK);
  1466. if (err)
  1467. return err;
  1468. /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
  1469. /* Result Info: UDF7=1, DS lite */
  1470. err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
  1471. MVPP2_PRS_RI_UDF7_IP6_LITE,
  1472. MVPP2_PRS_RI_UDF7_MASK);
  1473. if (err)
  1474. return err;
  1475. /* IPv6 multicast */
  1476. err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
  1477. if (err)
  1478. return err;
  1479. /* Entry for checking hop limit */
  1480. tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
  1481. MVPP2_PE_LAST_FREE_TID);
  1482. if (tid < 0)
  1483. return tid;
  1484. memset(&pe, 0, sizeof(pe));
  1485. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1486. pe.index = tid;
  1487. /* Finished: go to flowid generation */
  1488. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1489. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1490. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
  1491. MVPP2_PRS_RI_DROP_MASK,
  1492. MVPP2_PRS_RI_L3_PROTO_MASK |
  1493. MVPP2_PRS_RI_DROP_MASK);
  1494. mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
  1495. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  1496. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1497. /* Update shadow table and hw entry */
  1498. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1499. mvpp2_prs_hw_write(priv, &pe);
  1500. /* Default IPv6 entry for unknown protocols */
  1501. memset(&pe, 0, sizeof(pe));
  1502. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1503. pe.index = MVPP2_PE_IP6_PROTO_UN;
  1504. /* Finished: go to flowid generation */
  1505. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1506. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1507. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
  1508. MVPP2_PRS_RI_L4_PROTO_MASK);
  1509. /* Set L4 offset relatively to our current place */
  1510. mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
  1511. sizeof(struct ipv6hdr) - 4,
  1512. MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
  1513. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  1514. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1515. /* Unmask all ports */
  1516. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1517. /* Update shadow table and hw entry */
  1518. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1519. mvpp2_prs_hw_write(priv, &pe);
  1520. /* Default IPv6 entry for unknown ext protocols */
  1521. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1522. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1523. pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
  1524. /* Finished: go to flowid generation */
  1525. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1526. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
  1527. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
  1528. MVPP2_PRS_RI_L4_PROTO_MASK);
  1529. mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
  1530. MVPP2_PRS_IPV6_EXT_AI_BIT);
  1531. /* Unmask all ports */
  1532. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1533. /* Update shadow table and hw entry */
  1534. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
  1535. mvpp2_prs_hw_write(priv, &pe);
  1536. /* Default IPv6 entry for unicast address */
  1537. memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
  1538. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1539. pe.index = MVPP2_PE_IP6_ADDR_UN;
  1540. /* Finished: go to IPv6 again */
  1541. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
  1542. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
  1543. MVPP2_PRS_RI_L3_ADDR_MASK);
  1544. mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
  1545. MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1546. /* Shift back to IPV6 NH */
  1547. mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1548. mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
  1549. /* Unmask all ports */
  1550. mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
  1551. /* Update shadow table and hw entry */
  1552. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
  1553. mvpp2_prs_hw_write(priv, &pe);
  1554. return 0;
  1555. }
  1556. /* Find tcam entry with matched pair <vid,port> */
  1557. static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
  1558. u16 mask)
  1559. {
  1560. unsigned char byte[2], enable[2];
  1561. struct mvpp2_prs_entry pe;
  1562. u16 rvid, rmask;
  1563. int tid;
  1564. /* Go through the all entries with MVPP2_PRS_LU_VID */
  1565. for (tid = MVPP2_PE_VID_FILT_RANGE_START;
  1566. tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
  1567. if (!priv->prs_shadow[tid].valid ||
  1568. priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
  1569. continue;
  1570. mvpp2_prs_init_from_hw(priv, &pe, tid);
  1571. mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
  1572. mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
  1573. rvid = ((byte[0] & 0xf) << 8) + byte[1];
  1574. rmask = ((enable[0] & 0xf) << 8) + enable[1];
  1575. if (rvid != vid || rmask != mask)
  1576. continue;
  1577. return tid;
  1578. }
  1579. return -ENOENT;
  1580. }
  1581. /* Write parser entry for VID filtering */
  1582. int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
  1583. {
  1584. unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
  1585. port->id * MVPP2_PRS_VLAN_FILT_MAX;
  1586. unsigned int mask = 0xfff, reg_val, shift;
  1587. struct mvpp2 *priv = port->priv;
  1588. struct mvpp2_prs_entry pe;
  1589. int tid;
  1590. memset(&pe, 0, sizeof(pe));
  1591. /* Scan TCAM and see if entry with this <vid,port> already exist */
  1592. tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
  1593. reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
  1594. if (reg_val & MVPP2_DSA_EXTENDED)
  1595. shift = MVPP2_VLAN_TAG_EDSA_LEN;
  1596. else
  1597. shift = MVPP2_VLAN_TAG_LEN;
  1598. /* No such entry */
  1599. if (tid < 0) {
  1600. /* Go through all entries from first to last in vlan range */
  1601. tid = mvpp2_prs_tcam_first_free(priv, vid_start,
  1602. vid_start +
  1603. MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
  1604. /* There isn't room for a new VID filter */
  1605. if (tid < 0)
  1606. return tid;
  1607. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
  1608. pe.index = tid;
  1609. /* Mask all ports */
  1610. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1611. } else {
  1612. mvpp2_prs_init_from_hw(priv, &pe, tid);
  1613. }
  1614. /* Enable the current port */
  1615. mvpp2_prs_tcam_port_set(&pe, port->id, true);
  1616. /* Continue - set next lookup */
  1617. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1618. /* Skip VLAN header - Set offset to 4 or 8 bytes */
  1619. mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1620. /* Set match on VID */
  1621. mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
  1622. /* Clear all ai bits for next iteration */
  1623. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1624. /* Update shadow table */
  1625. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
  1626. mvpp2_prs_hw_write(priv, &pe);
  1627. return 0;
  1628. }
  1629. /* Write parser entry for VID filtering */
  1630. void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
  1631. {
  1632. struct mvpp2 *priv = port->priv;
  1633. int tid;
  1634. /* Scan TCAM and see if entry with this <vid,port> already exist */
  1635. tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
  1636. /* No such entry */
  1637. if (tid < 0)
  1638. return;
  1639. mvpp2_prs_hw_inv(priv, tid);
  1640. priv->prs_shadow[tid].valid = false;
  1641. }
  1642. /* Remove all existing VID filters on this port */
  1643. void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
  1644. {
  1645. struct mvpp2 *priv = port->priv;
  1646. int tid;
  1647. for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
  1648. tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
  1649. if (priv->prs_shadow[tid].valid)
  1650. mvpp2_prs_vid_entry_remove(port, tid);
  1651. }
  1652. }
  1653. /* Remove VID filering entry for this port */
  1654. void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
  1655. {
  1656. unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
  1657. struct mvpp2 *priv = port->priv;
  1658. /* Invalidate the guard entry */
  1659. mvpp2_prs_hw_inv(priv, tid);
  1660. priv->prs_shadow[tid].valid = false;
  1661. }
  1662. /* Add guard entry that drops packets when no VID is matched on this port */
  1663. void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
  1664. {
  1665. unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
  1666. struct mvpp2 *priv = port->priv;
  1667. unsigned int reg_val, shift;
  1668. struct mvpp2_prs_entry pe;
  1669. if (priv->prs_shadow[tid].valid)
  1670. return;
  1671. memset(&pe, 0, sizeof(pe));
  1672. pe.index = tid;
  1673. reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
  1674. if (reg_val & MVPP2_DSA_EXTENDED)
  1675. shift = MVPP2_VLAN_TAG_EDSA_LEN;
  1676. else
  1677. shift = MVPP2_VLAN_TAG_LEN;
  1678. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
  1679. /* Mask all ports */
  1680. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1681. /* Update port mask */
  1682. mvpp2_prs_tcam_port_set(&pe, port->id, true);
  1683. /* Continue - set next lookup */
  1684. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
  1685. /* Skip VLAN header - Set offset to 4 or 8 bytes */
  1686. mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1687. /* Drop VLAN packets that don't belong to any VIDs on this port */
  1688. mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
  1689. MVPP2_PRS_RI_DROP_MASK);
  1690. /* Clear all ai bits for next iteration */
  1691. mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
  1692. /* Update shadow table */
  1693. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
  1694. mvpp2_prs_hw_write(priv, &pe);
  1695. }
  1696. /* Parser default initialization */
  1697. int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
  1698. {
  1699. int err, index, i;
  1700. /* Enable tcam table */
  1701. mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
  1702. /* Clear all tcam and sram entries */
  1703. for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
  1704. mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
  1705. for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
  1706. mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
  1707. mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
  1708. for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
  1709. mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
  1710. }
  1711. /* Invalidate all tcam entries */
  1712. for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
  1713. mvpp2_prs_hw_inv(priv, index);
  1714. priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
  1715. sizeof(*priv->prs_shadow),
  1716. GFP_KERNEL);
  1717. if (!priv->prs_shadow)
  1718. return -ENOMEM;
  1719. /* Always start from lookup = 0 */
  1720. for (index = 0; index < MVPP2_MAX_PORTS; index++)
  1721. mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
  1722. MVPP2_PRS_PORT_LU_MAX, 0);
  1723. mvpp2_prs_def_flow_init(priv);
  1724. mvpp2_prs_mh_init(priv);
  1725. mvpp2_prs_mac_init(priv);
  1726. mvpp2_prs_dsa_init(priv);
  1727. mvpp2_prs_vid_init(priv);
  1728. err = mvpp2_prs_etype_init(priv);
  1729. if (err)
  1730. return err;
  1731. err = mvpp2_prs_vlan_init(pdev, priv);
  1732. if (err)
  1733. return err;
  1734. err = mvpp2_prs_pppoe_init(priv);
  1735. if (err)
  1736. return err;
  1737. err = mvpp2_prs_ip6_init(priv);
  1738. if (err)
  1739. return err;
  1740. err = mvpp2_prs_ip4_init(priv);
  1741. if (err)
  1742. return err;
  1743. return 0;
  1744. }
  1745. /* Compare MAC DA with tcam entry data */
  1746. static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
  1747. const u8 *da, unsigned char *mask)
  1748. {
  1749. unsigned char tcam_byte, tcam_mask;
  1750. int index;
  1751. for (index = 0; index < ETH_ALEN; index++) {
  1752. mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
  1753. if (tcam_mask != mask[index])
  1754. return false;
  1755. if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
  1756. return false;
  1757. }
  1758. return true;
  1759. }
  1760. /* Find tcam entry with matched pair <MAC DA, port> */
  1761. static int
  1762. mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
  1763. unsigned char *mask, int udf_type)
  1764. {
  1765. struct mvpp2_prs_entry pe;
  1766. int tid;
  1767. /* Go through the all entires with MVPP2_PRS_LU_MAC */
  1768. for (tid = MVPP2_PE_MAC_RANGE_START;
  1769. tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
  1770. unsigned int entry_pmap;
  1771. if (!priv->prs_shadow[tid].valid ||
  1772. (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
  1773. (priv->prs_shadow[tid].udf != udf_type))
  1774. continue;
  1775. mvpp2_prs_init_from_hw(priv, &pe, tid);
  1776. entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
  1777. if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
  1778. entry_pmap == pmap)
  1779. return tid;
  1780. }
  1781. return -ENOENT;
  1782. }
  1783. /* Update parser's mac da entry */
  1784. int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
  1785. {
  1786. unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  1787. struct mvpp2 *priv = port->priv;
  1788. unsigned int pmap, len, ri;
  1789. struct mvpp2_prs_entry pe;
  1790. int tid;
  1791. memset(&pe, 0, sizeof(pe));
  1792. /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
  1793. tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
  1794. MVPP2_PRS_UDF_MAC_DEF);
  1795. /* No such entry */
  1796. if (tid < 0) {
  1797. if (!add)
  1798. return 0;
  1799. /* Create new TCAM entry */
  1800. /* Go through the all entries from first to last */
  1801. tid = mvpp2_prs_tcam_first_free(priv,
  1802. MVPP2_PE_MAC_RANGE_START,
  1803. MVPP2_PE_MAC_RANGE_END);
  1804. if (tid < 0)
  1805. return tid;
  1806. pe.index = tid;
  1807. /* Mask all ports */
  1808. mvpp2_prs_tcam_port_map_set(&pe, 0);
  1809. } else {
  1810. mvpp2_prs_init_from_hw(priv, &pe, tid);
  1811. }
  1812. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
  1813. /* Update port mask */
  1814. mvpp2_prs_tcam_port_set(&pe, port->id, add);
  1815. /* Invalidate the entry if no ports are left enabled */
  1816. pmap = mvpp2_prs_tcam_port_map_get(&pe);
  1817. if (pmap == 0) {
  1818. if (add)
  1819. return -EINVAL;
  1820. mvpp2_prs_hw_inv(priv, pe.index);
  1821. priv->prs_shadow[pe.index].valid = false;
  1822. return 0;
  1823. }
  1824. /* Continue - set next lookup */
  1825. mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
  1826. /* Set match on DA */
  1827. len = ETH_ALEN;
  1828. while (len--)
  1829. mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
  1830. /* Set result info bits */
  1831. if (is_broadcast_ether_addr(da)) {
  1832. ri = MVPP2_PRS_RI_L2_BCAST;
  1833. } else if (is_multicast_ether_addr(da)) {
  1834. ri = MVPP2_PRS_RI_L2_MCAST;
  1835. } else {
  1836. ri = MVPP2_PRS_RI_L2_UCAST;
  1837. if (ether_addr_equal(da, port->dev->dev_addr))
  1838. ri |= MVPP2_PRS_RI_MAC_ME_MASK;
  1839. }
  1840. mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
  1841. MVPP2_PRS_RI_MAC_ME_MASK);
  1842. mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
  1843. MVPP2_PRS_RI_MAC_ME_MASK);
  1844. /* Shift to ethertype */
  1845. mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
  1846. MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
  1847. /* Update shadow table and hw entry */
  1848. priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
  1849. mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
  1850. mvpp2_prs_hw_write(priv, &pe);
  1851. return 0;
  1852. }
  1853. int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
  1854. {
  1855. struct mvpp2_port *port = netdev_priv(dev);
  1856. int err;
  1857. /* Remove old parser entry */
  1858. err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
  1859. if (err)
  1860. return err;
  1861. /* Add new parser entry */
  1862. err = mvpp2_prs_mac_da_accept(port, da, true);
  1863. if (err)
  1864. return err;
  1865. /* Set addr in the device */
  1866. ether_addr_copy(dev->dev_addr, da);
  1867. return 0;
  1868. }
  1869. void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
  1870. {
  1871. struct mvpp2 *priv = port->priv;
  1872. struct mvpp2_prs_entry pe;
  1873. unsigned long pmap;
  1874. int index, tid;
  1875. for (tid = MVPP2_PE_MAC_RANGE_START;
  1876. tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
  1877. unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
  1878. if (!priv->prs_shadow[tid].valid ||
  1879. (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
  1880. (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
  1881. continue;
  1882. mvpp2_prs_init_from_hw(priv, &pe, tid);
  1883. pmap = mvpp2_prs_tcam_port_map_get(&pe);
  1884. /* We only want entries active on this port */
  1885. if (!test_bit(port->id, &pmap))
  1886. continue;
  1887. /* Read mac addr from entry */
  1888. for (index = 0; index < ETH_ALEN; index++)
  1889. mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
  1890. &da_mask[index]);
  1891. /* Special cases : Don't remove broadcast and port's own
  1892. * address
  1893. */
  1894. if (is_broadcast_ether_addr(da) ||
  1895. ether_addr_equal(da, port->dev->dev_addr))
  1896. continue;
  1897. /* Remove entry from TCAM */
  1898. mvpp2_prs_mac_da_accept(port, da, false);
  1899. }
  1900. }
  1901. int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
  1902. {
  1903. switch (type) {
  1904. case MVPP2_TAG_TYPE_EDSA:
  1905. /* Add port to EDSA entries */
  1906. mvpp2_prs_dsa_tag_set(priv, port, true,
  1907. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  1908. mvpp2_prs_dsa_tag_set(priv, port, true,
  1909. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  1910. /* Remove port from DSA entries */
  1911. mvpp2_prs_dsa_tag_set(priv, port, false,
  1912. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  1913. mvpp2_prs_dsa_tag_set(priv, port, false,
  1914. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  1915. break;
  1916. case MVPP2_TAG_TYPE_DSA:
  1917. /* Add port to DSA entries */
  1918. mvpp2_prs_dsa_tag_set(priv, port, true,
  1919. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  1920. mvpp2_prs_dsa_tag_set(priv, port, true,
  1921. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  1922. /* Remove port from EDSA entries */
  1923. mvpp2_prs_dsa_tag_set(priv, port, false,
  1924. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  1925. mvpp2_prs_dsa_tag_set(priv, port, false,
  1926. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  1927. break;
  1928. case MVPP2_TAG_TYPE_MH:
  1929. case MVPP2_TAG_TYPE_NONE:
  1930. /* Remove port form EDSA and DSA entries */
  1931. mvpp2_prs_dsa_tag_set(priv, port, false,
  1932. MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
  1933. mvpp2_prs_dsa_tag_set(priv, port, false,
  1934. MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
  1935. mvpp2_prs_dsa_tag_set(priv, port, false,
  1936. MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
  1937. mvpp2_prs_dsa_tag_set(priv, port, false,
  1938. MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
  1939. break;
  1940. default:
  1941. if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
  1942. return -EINVAL;
  1943. }
  1944. return 0;
  1945. }
  1946. /* Set prs flow for the port */
  1947. int mvpp2_prs_def_flow(struct mvpp2_port *port)
  1948. {
  1949. struct mvpp2_prs_entry pe;
  1950. int tid;
  1951. memset(&pe, 0, sizeof(pe));
  1952. tid = mvpp2_prs_flow_find(port->priv, port->id);
  1953. /* Such entry not exist */
  1954. if (tid < 0) {
  1955. /* Go through the all entires from last to first */
  1956. tid = mvpp2_prs_tcam_first_free(port->priv,
  1957. MVPP2_PE_LAST_FREE_TID,
  1958. MVPP2_PE_FIRST_FREE_TID);
  1959. if (tid < 0)
  1960. return tid;
  1961. pe.index = tid;
  1962. /* Set flow ID*/
  1963. mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
  1964. mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
  1965. /* Update shadow table */
  1966. mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
  1967. } else {
  1968. mvpp2_prs_init_from_hw(port->priv, &pe, tid);
  1969. }
  1970. mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
  1971. mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
  1972. mvpp2_prs_hw_write(port->priv, &pe);
  1973. return 0;
  1974. }