stmmac_tc.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. // SPDX-License-Identifier: (GPL-2.0 OR MIT)
  2. /*
  3. * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
  4. * stmmac TC Handling (HW only)
  5. */
  6. #include <net/pkt_cls.h>
  7. #include <net/tc_act/tc_gact.h>
  8. #include "common.h"
  9. #include "dwmac4.h"
  10. #include "dwmac5.h"
  11. #include "stmmac.h"
  12. static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
  13. {
  14. memset(entry, 0, sizeof(*entry));
  15. entry->in_use = true;
  16. entry->is_last = true;
  17. entry->is_frag = false;
  18. entry->prio = ~0x0;
  19. entry->handle = 0;
  20. entry->val.match_data = 0x0;
  21. entry->val.match_en = 0x0;
  22. entry->val.af = 1;
  23. entry->val.dma_ch_no = 0x0;
  24. }
  25. static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
  26. struct tc_cls_u32_offload *cls,
  27. bool free)
  28. {
  29. struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
  30. u32 loc = cls->knode.handle;
  31. int i;
  32. for (i = 0; i < priv->tc_entries_max; i++) {
  33. entry = &priv->tc_entries[i];
  34. if (!entry->in_use && !first && free)
  35. first = entry;
  36. if (entry->handle == loc && !free)
  37. dup = entry;
  38. }
  39. if (dup)
  40. return dup;
  41. if (first) {
  42. first->handle = loc;
  43. first->in_use = true;
  44. /* Reset HW values */
  45. memset(&first->val, 0, sizeof(first->val));
  46. }
  47. return first;
  48. }
  49. static int tc_fill_actions(struct stmmac_tc_entry *entry,
  50. struct stmmac_tc_entry *frag,
  51. struct tc_cls_u32_offload *cls)
  52. {
  53. struct stmmac_tc_entry *action_entry = entry;
  54. const struct tc_action *act;
  55. struct tcf_exts *exts;
  56. int i;
  57. exts = cls->knode.exts;
  58. if (!tcf_exts_has_actions(exts))
  59. return -EINVAL;
  60. if (frag)
  61. action_entry = frag;
  62. tcf_exts_for_each_action(i, act, exts) {
  63. /* Accept */
  64. if (is_tcf_gact_ok(act)) {
  65. action_entry->val.af = 1;
  66. break;
  67. }
  68. /* Drop */
  69. if (is_tcf_gact_shot(act)) {
  70. action_entry->val.rf = 1;
  71. break;
  72. }
  73. /* Unsupported */
  74. return -EINVAL;
  75. }
  76. return 0;
  77. }
  78. static int tc_fill_entry(struct stmmac_priv *priv,
  79. struct tc_cls_u32_offload *cls)
  80. {
  81. struct stmmac_tc_entry *entry, *frag = NULL;
  82. struct tc_u32_sel *sel = cls->knode.sel;
  83. u32 off, data, mask, real_off, rem;
  84. u32 prio = cls->common.prio;
  85. int ret;
  86. /* Only 1 match per entry */
  87. if (sel->nkeys <= 0 || sel->nkeys > 1)
  88. return -EINVAL;
  89. off = sel->keys[0].off << sel->offshift;
  90. data = sel->keys[0].val;
  91. mask = sel->keys[0].mask;
  92. switch (ntohs(cls->common.protocol)) {
  93. case ETH_P_ALL:
  94. break;
  95. case ETH_P_IP:
  96. off += ETH_HLEN;
  97. break;
  98. default:
  99. return -EINVAL;
  100. }
  101. if (off > priv->tc_off_max)
  102. return -EINVAL;
  103. real_off = off / 4;
  104. rem = off % 4;
  105. entry = tc_find_entry(priv, cls, true);
  106. if (!entry)
  107. return -EINVAL;
  108. if (rem) {
  109. frag = tc_find_entry(priv, cls, true);
  110. if (!frag) {
  111. ret = -EINVAL;
  112. goto err_unuse;
  113. }
  114. entry->frag_ptr = frag;
  115. entry->val.match_en = (mask << (rem * 8)) &
  116. GENMASK(31, rem * 8);
  117. entry->val.match_data = (data << (rem * 8)) &
  118. GENMASK(31, rem * 8);
  119. entry->val.frame_offset = real_off;
  120. entry->prio = prio;
  121. frag->val.match_en = (mask >> (rem * 8)) &
  122. GENMASK(rem * 8 - 1, 0);
  123. frag->val.match_data = (data >> (rem * 8)) &
  124. GENMASK(rem * 8 - 1, 0);
  125. frag->val.frame_offset = real_off + 1;
  126. frag->prio = prio;
  127. frag->is_frag = true;
  128. } else {
  129. entry->frag_ptr = NULL;
  130. entry->val.match_en = mask;
  131. entry->val.match_data = data;
  132. entry->val.frame_offset = real_off;
  133. entry->prio = prio;
  134. }
  135. ret = tc_fill_actions(entry, frag, cls);
  136. if (ret)
  137. goto err_unuse;
  138. return 0;
  139. err_unuse:
  140. if (frag)
  141. frag->in_use = false;
  142. entry->in_use = false;
  143. return ret;
  144. }
  145. static void tc_unfill_entry(struct stmmac_priv *priv,
  146. struct tc_cls_u32_offload *cls)
  147. {
  148. struct stmmac_tc_entry *entry;
  149. entry = tc_find_entry(priv, cls, false);
  150. if (!entry)
  151. return;
  152. entry->in_use = false;
  153. if (entry->frag_ptr) {
  154. entry = entry->frag_ptr;
  155. entry->is_frag = false;
  156. entry->in_use = false;
  157. }
  158. }
  159. static int tc_config_knode(struct stmmac_priv *priv,
  160. struct tc_cls_u32_offload *cls)
  161. {
  162. int ret;
  163. ret = tc_fill_entry(priv, cls);
  164. if (ret)
  165. return ret;
  166. ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
  167. priv->tc_entries_max);
  168. if (ret)
  169. goto err_unfill;
  170. return 0;
  171. err_unfill:
  172. tc_unfill_entry(priv, cls);
  173. return ret;
  174. }
  175. static int tc_delete_knode(struct stmmac_priv *priv,
  176. struct tc_cls_u32_offload *cls)
  177. {
  178. int ret;
  179. /* Set entry and fragments as not used */
  180. tc_unfill_entry(priv, cls);
  181. ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
  182. priv->tc_entries_max);
  183. if (ret)
  184. return ret;
  185. return 0;
  186. }
  187. static int tc_setup_cls_u32(struct stmmac_priv *priv,
  188. struct tc_cls_u32_offload *cls)
  189. {
  190. switch (cls->command) {
  191. case TC_CLSU32_REPLACE_KNODE:
  192. tc_unfill_entry(priv, cls);
  193. /* Fall through */
  194. case TC_CLSU32_NEW_KNODE:
  195. return tc_config_knode(priv, cls);
  196. case TC_CLSU32_DELETE_KNODE:
  197. return tc_delete_knode(priv, cls);
  198. default:
  199. return -EOPNOTSUPP;
  200. }
  201. }
  202. static int tc_init(struct stmmac_priv *priv)
  203. {
  204. struct dma_features *dma_cap = &priv->dma_cap;
  205. unsigned int count;
  206. if (!dma_cap->frpsel)
  207. return -EINVAL;
  208. switch (dma_cap->frpbs) {
  209. case 0x0:
  210. priv->tc_off_max = 64;
  211. break;
  212. case 0x1:
  213. priv->tc_off_max = 128;
  214. break;
  215. case 0x2:
  216. priv->tc_off_max = 256;
  217. break;
  218. default:
  219. return -EINVAL;
  220. }
  221. switch (dma_cap->frpes) {
  222. case 0x0:
  223. count = 64;
  224. break;
  225. case 0x1:
  226. count = 128;
  227. break;
  228. case 0x2:
  229. count = 256;
  230. break;
  231. default:
  232. return -EINVAL;
  233. }
  234. /* Reserve one last filter which lets all pass */
  235. priv->tc_entries_max = count;
  236. priv->tc_entries = devm_kcalloc(priv->device,
  237. count, sizeof(*priv->tc_entries), GFP_KERNEL);
  238. if (!priv->tc_entries)
  239. return -ENOMEM;
  240. tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
  241. dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
  242. priv->tc_entries_max, priv->tc_off_max);
  243. return 0;
  244. }
  245. static int tc_setup_cbs(struct stmmac_priv *priv,
  246. struct tc_cbs_qopt_offload *qopt)
  247. {
  248. u32 tx_queues_count = priv->plat->tx_queues_to_use;
  249. u32 queue = qopt->queue;
  250. u32 ptr, speed_div;
  251. u32 mode_to_use;
  252. u64 value;
  253. int ret;
  254. /* Queue 0 is not AVB capable */
  255. if (queue <= 0 || queue >= tx_queues_count)
  256. return -EINVAL;
  257. if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
  258. return -EOPNOTSUPP;
  259. mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
  260. if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
  261. ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
  262. if (ret)
  263. return ret;
  264. priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
  265. } else if (!qopt->enable) {
  266. return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
  267. }
  268. /* Port Transmit Rate and Speed Divider */
  269. ptr = (priv->speed == SPEED_100) ? 4 : 8;
  270. speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000;
  271. /* Final adjustments for HW */
  272. value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
  273. priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
  274. value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
  275. priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
  276. value = qopt->hicredit * 1024ll * 8;
  277. priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
  278. value = qopt->locredit * 1024ll * 8;
  279. priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
  280. ret = stmmac_config_cbs(priv, priv->hw,
  281. priv->plat->tx_queues_cfg[queue].send_slope,
  282. priv->plat->tx_queues_cfg[queue].idle_slope,
  283. priv->plat->tx_queues_cfg[queue].high_credit,
  284. priv->plat->tx_queues_cfg[queue].low_credit,
  285. queue);
  286. if (ret)
  287. return ret;
  288. dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
  289. queue, qopt->sendslope, qopt->idleslope,
  290. qopt->hicredit, qopt->locredit);
  291. return 0;
  292. }
  293. const struct stmmac_tc_ops dwmac510_tc_ops = {
  294. .init = tc_init,
  295. .setup_cls_u32 = tc_setup_cls_u32,
  296. .setup_cbs = tc_setup_cbs,
  297. };