spectrum.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
  3. * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
  5. * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  6. * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #ifndef _MLXSW_SPECTRUM_H
  37. #define _MLXSW_SPECTRUM_H
  38. #include <linux/types.h>
  39. #include <linux/netdevice.h>
  40. #include <linux/rhashtable.h>
  41. #include <linux/bitops.h>
  42. #include <linux/if_vlan.h>
  43. #include <linux/list.h>
  44. #include <linux/dcbnl.h>
  45. #include <linux/in6.h>
  46. #include <linux/notifier.h>
  47. #include <net/psample.h>
  48. #include <net/pkt_cls.h>
  49. #include "port.h"
  50. #include "core.h"
  51. #include "core_acl_flex_keys.h"
  52. #include "core_acl_flex_actions.h"
  53. #define MLXSW_SP_VFID_BASE VLAN_N_VID
  54. #define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
  55. #define MLXSW_SP_DUMMY_FID 15359
  56. #define MLXSW_SP_RFID_BASE 15360
  57. #define MLXSW_SP_MID_MAX 7000
  58. #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
  59. #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
  60. #define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
  61. #define MLXSW_SP_KVD_GRANULARITY 128
  62. struct mlxsw_sp_port;
  63. struct mlxsw_sp_rif;
  64. struct mlxsw_sp_upper {
  65. struct net_device *dev;
  66. unsigned int ref_count;
  67. };
  68. struct mlxsw_sp_fid {
  69. void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
  70. struct list_head list;
  71. unsigned int ref_count;
  72. struct net_device *dev;
  73. struct mlxsw_sp_rif *rif;
  74. u16 fid;
  75. };
  76. struct mlxsw_sp_mid {
  77. struct list_head list;
  78. unsigned char addr[ETH_ALEN];
  79. u16 fid;
  80. u16 mid;
  81. unsigned int ref_count;
  82. };
  83. static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
  84. {
  85. return MLXSW_SP_VFID_BASE + vfid;
  86. }
  87. static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
  88. {
  89. return fid - MLXSW_SP_VFID_BASE;
  90. }
  91. static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
  92. {
  93. return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID;
  94. }
  95. struct mlxsw_sp_sb_pr {
  96. enum mlxsw_reg_sbpr_mode mode;
  97. u32 size;
  98. };
  99. struct mlxsw_cp_sb_occ {
  100. u32 cur;
  101. u32 max;
  102. };
  103. struct mlxsw_sp_sb_cm {
  104. u32 min_buff;
  105. u32 max_buff;
  106. u8 pool;
  107. struct mlxsw_cp_sb_occ occ;
  108. };
  109. struct mlxsw_sp_sb_pm {
  110. u32 min_buff;
  111. u32 max_buff;
  112. struct mlxsw_cp_sb_occ occ;
  113. };
  114. #define MLXSW_SP_SB_POOL_COUNT 4
  115. #define MLXSW_SP_SB_TC_COUNT 8
  116. struct mlxsw_sp_sb_port {
  117. struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
  118. struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
  119. };
  120. struct mlxsw_sp_sb {
  121. struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
  122. struct mlxsw_sp_sb_port *ports;
  123. u32 cell_size;
  124. };
  125. #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
  126. struct mlxsw_sp_prefix_usage {
  127. DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
  128. };
  129. enum mlxsw_sp_l3proto {
  130. MLXSW_SP_L3_PROTO_IPV4,
  131. MLXSW_SP_L3_PROTO_IPV6,
  132. };
  133. struct mlxsw_sp_lpm_tree {
  134. u8 id; /* tree ID */
  135. unsigned int ref_count;
  136. enum mlxsw_sp_l3proto proto;
  137. struct mlxsw_sp_prefix_usage prefix_usage;
  138. };
  139. struct mlxsw_sp_fib;
  140. struct mlxsw_sp_vr {
  141. u16 id; /* virtual router ID */
  142. u32 tb_id; /* kernel fib table id */
  143. unsigned int rif_count;
  144. struct mlxsw_sp_fib *fib4;
  145. };
  146. enum mlxsw_sp_span_type {
  147. MLXSW_SP_SPAN_EGRESS,
  148. MLXSW_SP_SPAN_INGRESS
  149. };
  150. struct mlxsw_sp_span_inspected_port {
  151. struct list_head list;
  152. enum mlxsw_sp_span_type type;
  153. u8 local_port;
  154. };
  155. struct mlxsw_sp_span_entry {
  156. u8 local_port;
  157. bool used;
  158. struct list_head bound_ports_list;
  159. int ref_count;
  160. int id;
  161. };
  162. enum mlxsw_sp_port_mall_action_type {
  163. MLXSW_SP_PORT_MALL_MIRROR,
  164. MLXSW_SP_PORT_MALL_SAMPLE,
  165. };
  166. struct mlxsw_sp_port_mall_mirror_tc_entry {
  167. u8 to_local_port;
  168. bool ingress;
  169. };
  170. struct mlxsw_sp_port_mall_tc_entry {
  171. struct list_head list;
  172. unsigned long cookie;
  173. enum mlxsw_sp_port_mall_action_type type;
  174. union {
  175. struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
  176. };
  177. };
  178. struct mlxsw_sp_router {
  179. struct mlxsw_sp_vr *vrs;
  180. struct rhashtable neigh_ht;
  181. struct rhashtable nexthop_group_ht;
  182. struct rhashtable nexthop_ht;
  183. struct {
  184. struct mlxsw_sp_lpm_tree *trees;
  185. unsigned int tree_count;
  186. } lpm;
  187. struct {
  188. struct delayed_work dw;
  189. unsigned long interval; /* ms */
  190. } neighs_update;
  191. struct delayed_work nexthop_probe_dw;
  192. #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
  193. struct list_head nexthop_neighs_list;
  194. bool aborted;
  195. };
  196. struct mlxsw_sp_acl;
  197. struct mlxsw_sp_counter_pool;
  198. struct mlxsw_sp {
  199. struct {
  200. struct list_head list;
  201. DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
  202. } vfids;
  203. struct {
  204. struct list_head list;
  205. DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
  206. } br_mids;
  207. struct list_head fids; /* VLAN-aware bridge FIDs */
  208. struct mlxsw_sp_rif **rifs;
  209. struct mlxsw_sp_port **ports;
  210. struct mlxsw_core *core;
  211. const struct mlxsw_bus_info *bus_info;
  212. unsigned char base_mac[ETH_ALEN];
  213. struct {
  214. struct delayed_work dw;
  215. #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
  216. unsigned int interval; /* ms */
  217. } fdb_notify;
  218. #define MLXSW_SP_MIN_AGEING_TIME 10
  219. #define MLXSW_SP_MAX_AGEING_TIME 1000000
  220. #define MLXSW_SP_DEFAULT_AGEING_TIME 300
  221. u32 ageing_time;
  222. struct mlxsw_sp_upper master_bridge;
  223. struct mlxsw_sp_upper *lags;
  224. u8 *port_to_module;
  225. struct mlxsw_sp_sb sb;
  226. struct mlxsw_sp_router router;
  227. struct mlxsw_sp_acl *acl;
  228. struct {
  229. DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
  230. } kvdl;
  231. struct mlxsw_sp_counter_pool *counter_pool;
  232. struct {
  233. struct mlxsw_sp_span_entry *entries;
  234. int entries_count;
  235. } span;
  236. struct notifier_block fib_nb;
  237. };
  238. static inline struct mlxsw_sp_upper *
  239. mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
  240. {
  241. return &mlxsw_sp->lags[lag_id];
  242. }
  243. static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp,
  244. u32 cells)
  245. {
  246. return mlxsw_sp->sb.cell_size * cells;
  247. }
  248. static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp,
  249. u32 bytes)
  250. {
  251. return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size);
  252. }
  253. struct mlxsw_sp_port_pcpu_stats {
  254. u64 rx_packets;
  255. u64 rx_bytes;
  256. u64 tx_packets;
  257. u64 tx_bytes;
  258. struct u64_stats_sync syncp;
  259. u32 tx_dropped;
  260. };
  261. struct mlxsw_sp_port_sample {
  262. struct psample_group __rcu *psample_group;
  263. u32 trunc_size;
  264. u32 rate;
  265. bool truncate;
  266. };
  267. struct mlxsw_sp_port {
  268. struct net_device *dev;
  269. struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
  270. struct mlxsw_sp *mlxsw_sp;
  271. u8 local_port;
  272. u8 stp_state;
  273. u16 learning:1,
  274. learning_sync:1,
  275. uc_flood:1,
  276. mc_flood:1,
  277. mc_router:1,
  278. mc_disabled:1,
  279. bridged:1,
  280. lagged:1,
  281. split:1;
  282. u16 pvid;
  283. u16 lag_id;
  284. struct {
  285. struct list_head list;
  286. struct mlxsw_sp_fid *f;
  287. u16 vid;
  288. } vport;
  289. struct {
  290. u8 tx_pause:1,
  291. rx_pause:1,
  292. autoneg:1;
  293. } link;
  294. struct {
  295. struct ieee_ets *ets;
  296. struct ieee_maxrate *maxrate;
  297. struct ieee_pfc *pfc;
  298. } dcb;
  299. struct {
  300. u8 module;
  301. u8 width;
  302. u8 lane;
  303. } mapping;
  304. /* 802.1Q bridge VLANs */
  305. unsigned long *active_vlans;
  306. unsigned long *untagged_vlans;
  307. /* VLAN interfaces */
  308. struct list_head vports_list;
  309. /* TC handles */
  310. struct list_head mall_tc_list;
  311. struct {
  312. #define MLXSW_HW_STATS_UPDATE_TIME HZ
  313. struct rtnl_link_stats64 *cache;
  314. struct delayed_work update_dw;
  315. } hw_stats;
  316. struct mlxsw_sp_port_sample *sample;
  317. };
  318. bool mlxsw_sp_port_dev_check(const struct net_device *dev);
  319. struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
  320. struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
  321. void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
  322. static inline bool
  323. mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
  324. {
  325. return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
  326. }
  327. static inline struct mlxsw_sp_port *
  328. mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
  329. {
  330. struct mlxsw_sp_port *mlxsw_sp_port;
  331. u8 local_port;
  332. local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core,
  333. lag_id, port_index);
  334. mlxsw_sp_port = mlxsw_sp->ports[local_port];
  335. return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
  336. }
  337. static inline u16
  338. mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
  339. {
  340. return mlxsw_sp_vport->vport.vid;
  341. }
  342. static inline bool
  343. mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
  344. {
  345. u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
  346. return vid != 0;
  347. }
  348. static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
  349. struct mlxsw_sp_fid *f)
  350. {
  351. mlxsw_sp_vport->vport.f = f;
  352. }
  353. static inline struct mlxsw_sp_fid *
  354. mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
  355. {
  356. return mlxsw_sp_vport->vport.f;
  357. }
  358. static inline struct net_device *
  359. mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
  360. {
  361. struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
  362. return f ? f->dev : NULL;
  363. }
  364. static inline struct mlxsw_sp_port *
  365. mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
  366. {
  367. struct mlxsw_sp_port *mlxsw_sp_vport;
  368. list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
  369. vport.list) {
  370. if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
  371. return mlxsw_sp_vport;
  372. }
  373. return NULL;
  374. }
  375. static inline struct mlxsw_sp_port *
  376. mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
  377. u16 fid)
  378. {
  379. struct mlxsw_sp_port *mlxsw_sp_vport;
  380. list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
  381. vport.list) {
  382. struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
  383. if (f && f->fid == fid)
  384. return mlxsw_sp_vport;
  385. }
  386. return NULL;
  387. }
  388. static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
  389. u16 fid)
  390. {
  391. struct mlxsw_sp_fid *f;
  392. list_for_each_entry(f, &mlxsw_sp->fids, list)
  393. if (f->fid == fid)
  394. return f;
  395. return NULL;
  396. }
  397. static inline struct mlxsw_sp_fid *
  398. mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
  399. const struct net_device *br_dev)
  400. {
  401. struct mlxsw_sp_fid *f;
  402. list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
  403. if (f->dev == br_dev)
  404. return f;
  405. return NULL;
  406. }
  407. enum mlxsw_sp_flood_table {
  408. MLXSW_SP_FLOOD_TABLE_UC,
  409. MLXSW_SP_FLOOD_TABLE_BC,
  410. MLXSW_SP_FLOOD_TABLE_MC,
  411. };
  412. int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
  413. void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
  414. int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
  415. int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
  416. unsigned int sb_index, u16 pool_index,
  417. struct devlink_sb_pool_info *pool_info);
  418. int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
  419. unsigned int sb_index, u16 pool_index, u32 size,
  420. enum devlink_sb_threshold_type threshold_type);
  421. int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
  422. unsigned int sb_index, u16 pool_index,
  423. u32 *p_threshold);
  424. int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
  425. unsigned int sb_index, u16 pool_index,
  426. u32 threshold);
  427. int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
  428. unsigned int sb_index, u16 tc_index,
  429. enum devlink_sb_pool_type pool_type,
  430. u16 *p_pool_index, u32 *p_threshold);
  431. int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
  432. unsigned int sb_index, u16 tc_index,
  433. enum devlink_sb_pool_type pool_type,
  434. u16 pool_index, u32 threshold);
  435. int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
  436. unsigned int sb_index);
  437. int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
  438. unsigned int sb_index);
  439. int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
  440. unsigned int sb_index, u16 pool_index,
  441. u32 *p_cur, u32 *p_max);
  442. int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
  443. unsigned int sb_index, u16 tc_index,
  444. enum devlink_sb_pool_type pool_type,
  445. u32 *p_cur, u32 *p_max);
  446. int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
  447. void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
  448. int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
  449. void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
  450. void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
  451. int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
  452. enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
  453. u16 vid);
  454. int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
  455. u16 vid_end, bool is_member, bool untagged);
  456. int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
  457. bool set);
  458. void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
  459. int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
  460. int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
  461. int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
  462. bool adding);
  463. struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
  464. void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
  465. int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
  466. enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
  467. bool dwrr, u8 dwrr_weight);
  468. int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
  469. u8 switch_prio, u8 tclass);
  470. int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
  471. u8 *prio_tc, bool pause_en,
  472. struct ieee_pfc *my_pfc);
  473. int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
  474. enum mlxsw_reg_qeec_hr hr, u8 index,
  475. u8 next_index, u32 maxrate);
  476. int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
  477. u16 vid_begin, u16 vid_end,
  478. bool learn_enable);
  479. #ifdef CONFIG_MLXSW_SPECTRUM_DCB
  480. int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
  481. void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
  482. #else
  483. static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
  484. {
  485. return 0;
  486. }
  487. static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
  488. {}
  489. #endif
  490. int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
  491. void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
  492. int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
  493. unsigned long event, void *ptr);
  494. int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
  495. int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
  496. unsigned long event, void *ptr);
  497. void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
  498. struct mlxsw_sp_rif *rif);
  499. int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
  500. struct netdev_notifier_changeupper_info *info);
  501. int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
  502. u32 *p_entry_index);
  503. void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
  504. struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
  505. struct mlxsw_sp_acl_rule_info {
  506. unsigned int priority;
  507. struct mlxsw_afk_element_values values;
  508. struct mlxsw_afa_block *act_block;
  509. unsigned int counter_index;
  510. bool counter_valid;
  511. };
  512. enum mlxsw_sp_acl_profile {
  513. MLXSW_SP_ACL_PROFILE_FLOWER,
  514. };
  515. struct mlxsw_sp_acl_profile_ops {
  516. size_t ruleset_priv_size;
  517. int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
  518. void *priv, void *ruleset_priv);
  519. void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
  520. int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
  521. struct net_device *dev, bool ingress);
  522. void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
  523. size_t rule_priv_size;
  524. int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
  525. void *ruleset_priv, void *rule_priv,
  526. struct mlxsw_sp_acl_rule_info *rulei);
  527. void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
  528. int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
  529. bool *activity);
  530. };
  531. struct mlxsw_sp_acl_ops {
  532. size_t priv_size;
  533. int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
  534. void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
  535. const struct mlxsw_sp_acl_profile_ops *
  536. (*profile_ops)(struct mlxsw_sp *mlxsw_sp,
  537. enum mlxsw_sp_acl_profile profile);
  538. };
  539. struct mlxsw_sp_acl_ruleset;
  540. struct mlxsw_sp_acl_ruleset *
  541. mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
  542. struct net_device *dev, bool ingress,
  543. enum mlxsw_sp_acl_profile profile);
  544. void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
  545. struct mlxsw_sp_acl_ruleset *ruleset);
  546. struct mlxsw_sp_acl_rule_info *
  547. mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl);
  548. void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei);
  549. int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei);
  550. void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
  551. unsigned int priority);
  552. void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
  553. enum mlxsw_afk_element element,
  554. u32 key_value, u32 mask_value);
  555. void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
  556. enum mlxsw_afk_element element,
  557. const char *key_value,
  558. const char *mask_value, unsigned int len);
  559. void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
  560. void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
  561. u16 group_id);
  562. int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
  563. int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
  564. struct mlxsw_sp_acl_rule_info *rulei,
  565. struct net_device *out_dev);
  566. int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
  567. struct mlxsw_sp_acl_rule_info *rulei,
  568. u32 action, u16 vid, u16 proto, u8 prio);
  569. int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
  570. struct mlxsw_sp_acl_rule_info *rulei);
  571. int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
  572. struct mlxsw_sp_acl_rule_info *rulei,
  573. u16 fid);
  574. struct mlxsw_sp_acl_rule;
  575. struct mlxsw_sp_acl_rule *
  576. mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
  577. struct mlxsw_sp_acl_ruleset *ruleset,
  578. unsigned long cookie);
  579. void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
  580. struct mlxsw_sp_acl_rule *rule);
  581. int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
  582. struct mlxsw_sp_acl_rule *rule);
  583. void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
  584. struct mlxsw_sp_acl_rule *rule);
  585. struct mlxsw_sp_acl_rule *
  586. mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
  587. struct mlxsw_sp_acl_ruleset *ruleset,
  588. unsigned long cookie);
  589. struct mlxsw_sp_acl_rule_info *
  590. mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
  591. int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
  592. struct mlxsw_sp_acl_rule *rule,
  593. u64 *packets, u64 *bytes, u64 *last_use);
  594. int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
  595. void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
  596. extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
  597. int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
  598. __be16 protocol, struct tc_cls_flower_offload *f);
  599. void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
  600. struct tc_cls_flower_offload *f);
  601. int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
  602. struct tc_cls_flower_offload *f);
  603. int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
  604. unsigned int counter_index, u64 *packets,
  605. u64 *bytes);
  606. int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
  607. unsigned int *p_counter_index);
  608. void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
  609. unsigned int counter_index);
  610. #endif