core.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816
  1. /*
  2. * drivers/net/ethernet/mellanox/mlxsw/core.c
  3. * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
  5. * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  6. * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/device.h>
  39. #include <linux/export.h>
  40. #include <linux/err.h>
  41. #include <linux/if_link.h>
  42. #include <linux/netdevice.h>
  43. #include <linux/completion.h>
  44. #include <linux/skbuff.h>
  45. #include <linux/etherdevice.h>
  46. #include <linux/types.h>
  47. #include <linux/string.h>
  48. #include <linux/gfp.h>
  49. #include <linux/random.h>
  50. #include <linux/jiffies.h>
  51. #include <linux/mutex.h>
  52. #include <linux/rcupdate.h>
  53. #include <linux/slab.h>
  54. #include <linux/workqueue.h>
  55. #include <asm/byteorder.h>
  56. #include <net/devlink.h>
  57. #include <trace/events/devlink.h>
  58. #include "core.h"
  59. #include "item.h"
  60. #include "cmd.h"
  61. #include "port.h"
  62. #include "trap.h"
  63. #include "emad.h"
  64. #include "reg.h"
  65. #include "resources.h"
  66. static LIST_HEAD(mlxsw_core_driver_list);
  67. static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
  68. static const char mlxsw_core_driver_name[] = "mlxsw_core";
  69. static struct workqueue_struct *mlxsw_wq;
  70. static struct workqueue_struct *mlxsw_owq;
  71. struct mlxsw_core_port {
  72. struct devlink_port devlink_port;
  73. void *port_driver_priv;
  74. u8 local_port;
  75. };
  76. void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
  77. {
  78. return mlxsw_core_port->port_driver_priv;
  79. }
  80. EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
  81. static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
  82. {
  83. return mlxsw_core_port->port_driver_priv != NULL;
  84. }
  85. struct mlxsw_core {
  86. struct mlxsw_driver *driver;
  87. const struct mlxsw_bus *bus;
  88. void *bus_priv;
  89. const struct mlxsw_bus_info *bus_info;
  90. struct list_head rx_listener_list;
  91. struct list_head event_listener_list;
  92. struct {
  93. atomic64_t tid;
  94. struct list_head trans_list;
  95. spinlock_t trans_list_lock; /* protects trans_list writes */
  96. bool use_emad;
  97. } emad;
  98. struct {
  99. u8 *mapping; /* lag_id+port_index to local_port mapping */
  100. } lag;
  101. struct mlxsw_res res;
  102. struct mlxsw_hwmon *hwmon;
  103. struct mlxsw_thermal *thermal;
  104. struct mlxsw_core_port *ports;
  105. unsigned int max_ports;
  106. unsigned long driver_priv[0];
  107. /* driver_priv has to be always the last item */
  108. };
  109. #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
  110. static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
  111. {
  112. /* Switch ports are numbered from 1 to queried value */
  113. if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
  114. mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
  115. MAX_SYSTEM_PORT) + 1;
  116. else
  117. mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
  118. mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
  119. sizeof(struct mlxsw_core_port), GFP_KERNEL);
  120. if (!mlxsw_core->ports)
  121. return -ENOMEM;
  122. return 0;
  123. }
  124. static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
  125. {
  126. kfree(mlxsw_core->ports);
  127. }
  128. unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
  129. {
  130. return mlxsw_core->max_ports;
  131. }
  132. EXPORT_SYMBOL(mlxsw_core_max_ports);
  133. void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
  134. {
  135. return mlxsw_core->driver_priv;
  136. }
  137. EXPORT_SYMBOL(mlxsw_core_driver_priv);
  138. struct mlxsw_rx_listener_item {
  139. struct list_head list;
  140. struct mlxsw_rx_listener rxl;
  141. void *priv;
  142. };
  143. struct mlxsw_event_listener_item {
  144. struct list_head list;
  145. struct mlxsw_event_listener el;
  146. void *priv;
  147. };
  148. /******************
  149. * EMAD processing
  150. ******************/
  151. /* emad_eth_hdr_dmac
  152. * Destination MAC in EMAD's Ethernet header.
  153. * Must be set to 01:02:c9:00:00:01
  154. */
  155. MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
  156. /* emad_eth_hdr_smac
  157. * Source MAC in EMAD's Ethernet header.
  158. * Must be set to 00:02:c9:01:02:03
  159. */
  160. MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
  161. /* emad_eth_hdr_ethertype
  162. * Ethertype in EMAD's Ethernet header.
  163. * Must be set to 0x8932
  164. */
  165. MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
  166. /* emad_eth_hdr_mlx_proto
  167. * Mellanox protocol.
  168. * Must be set to 0x0.
  169. */
  170. MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
  171. /* emad_eth_hdr_ver
  172. * Mellanox protocol version.
  173. * Must be set to 0x0.
  174. */
  175. MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
  176. /* emad_op_tlv_type
  177. * Type of the TLV.
  178. * Must be set to 0x1 (operation TLV).
  179. */
  180. MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
  181. /* emad_op_tlv_len
  182. * Length of the operation TLV in u32.
  183. * Must be set to 0x4.
  184. */
  185. MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
  186. /* emad_op_tlv_dr
  187. * Direct route bit. Setting to 1 indicates the EMAD is a direct route
  188. * EMAD. DR TLV must follow.
  189. *
  190. * Note: Currently not supported and must not be set.
  191. */
  192. MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
  193. /* emad_op_tlv_status
  194. * Returned status in case of EMAD response. Must be set to 0 in case
  195. * of EMAD request.
  196. * 0x0 - success
  197. * 0x1 - device is busy. Requester should retry
  198. * 0x2 - Mellanox protocol version not supported
  199. * 0x3 - unknown TLV
  200. * 0x4 - register not supported
  201. * 0x5 - operation class not supported
  202. * 0x6 - EMAD method not supported
  203. * 0x7 - bad parameter (e.g. port out of range)
  204. * 0x8 - resource not available
  205. * 0x9 - message receipt acknowledgment. Requester should retry
  206. * 0x70 - internal error
  207. */
  208. MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
  209. /* emad_op_tlv_register_id
  210. * Register ID of register within register TLV.
  211. */
  212. MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
  213. /* emad_op_tlv_r
  214. * Response bit. Setting to 1 indicates Response, otherwise request.
  215. */
  216. MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
  217. /* emad_op_tlv_method
  218. * EMAD method type.
  219. * 0x1 - query
  220. * 0x2 - write
  221. * 0x3 - send (currently not supported)
  222. * 0x4 - event
  223. */
  224. MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
  225. /* emad_op_tlv_class
  226. * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
  227. */
  228. MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
  229. /* emad_op_tlv_tid
  230. * EMAD transaction ID. Used for pairing request and response EMADs.
  231. */
  232. MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
  233. /* emad_reg_tlv_type
  234. * Type of the TLV.
  235. * Must be set to 0x3 (register TLV).
  236. */
  237. MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
  238. /* emad_reg_tlv_len
  239. * Length of the operation TLV in u32.
  240. */
  241. MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
  242. /* emad_end_tlv_type
  243. * Type of the TLV.
  244. * Must be set to 0x0 (end TLV).
  245. */
  246. MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
  247. /* emad_end_tlv_len
  248. * Length of the end TLV in u32.
  249. * Must be set to 1.
  250. */
  251. MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
  252. enum mlxsw_core_reg_access_type {
  253. MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
  254. MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
  255. };
  256. static inline const char *
  257. mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
  258. {
  259. switch (type) {
  260. case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
  261. return "query";
  262. case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
  263. return "write";
  264. }
  265. BUG();
  266. }
  267. static void mlxsw_emad_pack_end_tlv(char *end_tlv)
  268. {
  269. mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
  270. mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
  271. }
  272. static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
  273. const struct mlxsw_reg_info *reg,
  274. char *payload)
  275. {
  276. mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
  277. mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
  278. memcpy(reg_tlv + sizeof(u32), payload, reg->len);
  279. }
  280. static void mlxsw_emad_pack_op_tlv(char *op_tlv,
  281. const struct mlxsw_reg_info *reg,
  282. enum mlxsw_core_reg_access_type type,
  283. u64 tid)
  284. {
  285. mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
  286. mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
  287. mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
  288. mlxsw_emad_op_tlv_status_set(op_tlv, 0);
  289. mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
  290. mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
  291. if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
  292. mlxsw_emad_op_tlv_method_set(op_tlv,
  293. MLXSW_EMAD_OP_TLV_METHOD_QUERY);
  294. else
  295. mlxsw_emad_op_tlv_method_set(op_tlv,
  296. MLXSW_EMAD_OP_TLV_METHOD_WRITE);
  297. mlxsw_emad_op_tlv_class_set(op_tlv,
  298. MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
  299. mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
  300. }
  301. static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
  302. {
  303. char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
  304. mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
  305. mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
  306. mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
  307. mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
  308. mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
  309. skb_reset_mac_header(skb);
  310. return 0;
  311. }
  312. static void mlxsw_emad_construct(struct sk_buff *skb,
  313. const struct mlxsw_reg_info *reg,
  314. char *payload,
  315. enum mlxsw_core_reg_access_type type,
  316. u64 tid)
  317. {
  318. char *buf;
  319. buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
  320. mlxsw_emad_pack_end_tlv(buf);
  321. buf = skb_push(skb, reg->len + sizeof(u32));
  322. mlxsw_emad_pack_reg_tlv(buf, reg, payload);
  323. buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
  324. mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
  325. mlxsw_emad_construct_eth_hdr(skb);
  326. }
  327. static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
  328. {
  329. return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
  330. }
  331. static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
  332. {
  333. return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
  334. MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
  335. }
  336. static char *mlxsw_emad_reg_payload(const char *op_tlv)
  337. {
  338. return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
  339. }
  340. static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
  341. {
  342. char *op_tlv;
  343. op_tlv = mlxsw_emad_op_tlv(skb);
  344. return mlxsw_emad_op_tlv_tid_get(op_tlv);
  345. }
  346. static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
  347. {
  348. char *op_tlv;
  349. op_tlv = mlxsw_emad_op_tlv(skb);
  350. return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
  351. }
  352. static int mlxsw_emad_process_status(char *op_tlv,
  353. enum mlxsw_emad_op_tlv_status *p_status)
  354. {
  355. *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
  356. switch (*p_status) {
  357. case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
  358. return 0;
  359. case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
  360. case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
  361. return -EAGAIN;
  362. case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
  363. case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
  364. case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
  365. case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
  366. case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
  367. case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
  368. case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
  369. case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
  370. default:
  371. return -EIO;
  372. }
  373. }
  374. static int
  375. mlxsw_emad_process_status_skb(struct sk_buff *skb,
  376. enum mlxsw_emad_op_tlv_status *p_status)
  377. {
  378. return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
  379. }
  380. struct mlxsw_reg_trans {
  381. struct list_head list;
  382. struct list_head bulk_list;
  383. struct mlxsw_core *core;
  384. struct sk_buff *tx_skb;
  385. struct mlxsw_tx_info tx_info;
  386. struct delayed_work timeout_dw;
  387. unsigned int retries;
  388. u64 tid;
  389. struct completion completion;
  390. atomic_t active;
  391. mlxsw_reg_trans_cb_t *cb;
  392. unsigned long cb_priv;
  393. const struct mlxsw_reg_info *reg;
  394. enum mlxsw_core_reg_access_type type;
  395. int err;
  396. enum mlxsw_emad_op_tlv_status emad_status;
  397. struct rcu_head rcu;
  398. };
  399. #define MLXSW_EMAD_TIMEOUT_MS 200
  400. static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
  401. {
  402. unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
  403. mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
  404. }
  405. static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
  406. struct mlxsw_reg_trans *trans)
  407. {
  408. struct sk_buff *skb;
  409. int err;
  410. skb = skb_copy(trans->tx_skb, GFP_KERNEL);
  411. if (!skb)
  412. return -ENOMEM;
  413. trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
  414. skb->data + mlxsw_core->driver->txhdr_len,
  415. skb->len - mlxsw_core->driver->txhdr_len);
  416. atomic_set(&trans->active, 1);
  417. err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
  418. if (err) {
  419. dev_kfree_skb(skb);
  420. return err;
  421. }
  422. mlxsw_emad_trans_timeout_schedule(trans);
  423. return 0;
  424. }
  425. static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
  426. {
  427. struct mlxsw_core *mlxsw_core = trans->core;
  428. dev_kfree_skb(trans->tx_skb);
  429. spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
  430. list_del_rcu(&trans->list);
  431. spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
  432. trans->err = err;
  433. complete(&trans->completion);
  434. }
  435. static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
  436. struct mlxsw_reg_trans *trans)
  437. {
  438. int err;
  439. if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
  440. trans->retries++;
  441. err = mlxsw_emad_transmit(trans->core, trans);
  442. if (err == 0)
  443. return;
  444. } else {
  445. err = -EIO;
  446. }
  447. mlxsw_emad_trans_finish(trans, err);
  448. }
  449. static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
  450. {
  451. struct mlxsw_reg_trans *trans = container_of(work,
  452. struct mlxsw_reg_trans,
  453. timeout_dw.work);
  454. if (!atomic_dec_and_test(&trans->active))
  455. return;
  456. mlxsw_emad_transmit_retry(trans->core, trans);
  457. }
  458. static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
  459. struct mlxsw_reg_trans *trans,
  460. struct sk_buff *skb)
  461. {
  462. int err;
  463. if (!atomic_dec_and_test(&trans->active))
  464. return;
  465. err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
  466. if (err == -EAGAIN) {
  467. mlxsw_emad_transmit_retry(mlxsw_core, trans);
  468. } else {
  469. if (err == 0) {
  470. char *op_tlv = mlxsw_emad_op_tlv(skb);
  471. if (trans->cb)
  472. trans->cb(mlxsw_core,
  473. mlxsw_emad_reg_payload(op_tlv),
  474. trans->reg->len, trans->cb_priv);
  475. }
  476. mlxsw_emad_trans_finish(trans, err);
  477. }
  478. }
  479. /* called with rcu read lock held */
  480. static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
  481. void *priv)
  482. {
  483. struct mlxsw_core *mlxsw_core = priv;
  484. struct mlxsw_reg_trans *trans;
  485. trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
  486. skb->data, skb->len);
  487. if (!mlxsw_emad_is_resp(skb))
  488. goto free_skb;
  489. list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
  490. if (mlxsw_emad_get_tid(skb) == trans->tid) {
  491. mlxsw_emad_process_response(mlxsw_core, trans, skb);
  492. break;
  493. }
  494. }
  495. free_skb:
  496. dev_kfree_skb(skb);
  497. }
  498. static const struct mlxsw_listener mlxsw_emad_rx_listener =
  499. MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
  500. EMAD, DISCARD);
  501. static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
  502. {
  503. u64 tid;
  504. int err;
  505. if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
  506. return 0;
  507. /* Set the upper 32 bits of the transaction ID field to a random
  508. * number. This allows us to discard EMADs addressed to other
  509. * devices.
  510. */
  511. get_random_bytes(&tid, 4);
  512. tid <<= 32;
  513. atomic64_set(&mlxsw_core->emad.tid, tid);
  514. INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
  515. spin_lock_init(&mlxsw_core->emad.trans_list_lock);
  516. err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
  517. mlxsw_core);
  518. if (err)
  519. return err;
  520. err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
  521. if (err)
  522. goto err_emad_trap_set;
  523. mlxsw_core->emad.use_emad = true;
  524. return 0;
  525. err_emad_trap_set:
  526. mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
  527. mlxsw_core);
  528. return err;
  529. }
  530. static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
  531. {
  532. if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
  533. return;
  534. mlxsw_core->emad.use_emad = false;
  535. mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
  536. mlxsw_core);
  537. }
  538. static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
  539. u16 reg_len)
  540. {
  541. struct sk_buff *skb;
  542. u16 emad_len;
  543. emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
  544. (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
  545. sizeof(u32) + mlxsw_core->driver->txhdr_len);
  546. if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
  547. return NULL;
  548. skb = netdev_alloc_skb(NULL, emad_len);
  549. if (!skb)
  550. return NULL;
  551. memset(skb->data, 0, emad_len);
  552. skb_reserve(skb, emad_len);
  553. return skb;
  554. }
  555. static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
  556. const struct mlxsw_reg_info *reg,
  557. char *payload,
  558. enum mlxsw_core_reg_access_type type,
  559. struct mlxsw_reg_trans *trans,
  560. struct list_head *bulk_list,
  561. mlxsw_reg_trans_cb_t *cb,
  562. unsigned long cb_priv, u64 tid)
  563. {
  564. struct sk_buff *skb;
  565. int err;
  566. dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
  567. trans->tid, reg->id, mlxsw_reg_id_str(reg->id),
  568. mlxsw_core_reg_access_type_str(type));
  569. skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
  570. if (!skb)
  571. return -ENOMEM;
  572. list_add_tail(&trans->bulk_list, bulk_list);
  573. trans->core = mlxsw_core;
  574. trans->tx_skb = skb;
  575. trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
  576. trans->tx_info.is_emad = true;
  577. INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
  578. trans->tid = tid;
  579. init_completion(&trans->completion);
  580. trans->cb = cb;
  581. trans->cb_priv = cb_priv;
  582. trans->reg = reg;
  583. trans->type = type;
  584. mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
  585. mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
  586. spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
  587. list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
  588. spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
  589. err = mlxsw_emad_transmit(mlxsw_core, trans);
  590. if (err)
  591. goto err_out;
  592. return 0;
  593. err_out:
  594. spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
  595. list_del_rcu(&trans->list);
  596. spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
  597. list_del(&trans->bulk_list);
  598. dev_kfree_skb(trans->tx_skb);
  599. return err;
  600. }
  601. /*****************
  602. * Core functions
  603. *****************/
  604. int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
  605. {
  606. spin_lock(&mlxsw_core_driver_list_lock);
  607. list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
  608. spin_unlock(&mlxsw_core_driver_list_lock);
  609. return 0;
  610. }
  611. EXPORT_SYMBOL(mlxsw_core_driver_register);
  612. void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
  613. {
  614. spin_lock(&mlxsw_core_driver_list_lock);
  615. list_del(&mlxsw_driver->list);
  616. spin_unlock(&mlxsw_core_driver_list_lock);
  617. }
  618. EXPORT_SYMBOL(mlxsw_core_driver_unregister);
  619. static struct mlxsw_driver *__driver_find(const char *kind)
  620. {
  621. struct mlxsw_driver *mlxsw_driver;
  622. list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
  623. if (strcmp(mlxsw_driver->kind, kind) == 0)
  624. return mlxsw_driver;
  625. }
  626. return NULL;
  627. }
  628. static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
  629. {
  630. struct mlxsw_driver *mlxsw_driver;
  631. spin_lock(&mlxsw_core_driver_list_lock);
  632. mlxsw_driver = __driver_find(kind);
  633. spin_unlock(&mlxsw_core_driver_list_lock);
  634. return mlxsw_driver;
  635. }
  636. static void mlxsw_core_driver_put(const char *kind)
  637. {
  638. struct mlxsw_driver *mlxsw_driver;
  639. spin_lock(&mlxsw_core_driver_list_lock);
  640. mlxsw_driver = __driver_find(kind);
  641. spin_unlock(&mlxsw_core_driver_list_lock);
  642. }
  643. static int mlxsw_devlink_port_split(struct devlink *devlink,
  644. unsigned int port_index,
  645. unsigned int count)
  646. {
  647. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  648. if (port_index >= mlxsw_core->max_ports)
  649. return -EINVAL;
  650. if (!mlxsw_core->driver->port_split)
  651. return -EOPNOTSUPP;
  652. return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
  653. }
  654. static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
  655. unsigned int port_index)
  656. {
  657. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  658. if (port_index >= mlxsw_core->max_ports)
  659. return -EINVAL;
  660. if (!mlxsw_core->driver->port_unsplit)
  661. return -EOPNOTSUPP;
  662. return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
  663. }
  664. static int
  665. mlxsw_devlink_sb_pool_get(struct devlink *devlink,
  666. unsigned int sb_index, u16 pool_index,
  667. struct devlink_sb_pool_info *pool_info)
  668. {
  669. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  670. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  671. if (!mlxsw_driver->sb_pool_get)
  672. return -EOPNOTSUPP;
  673. return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
  674. pool_index, pool_info);
  675. }
  676. static int
  677. mlxsw_devlink_sb_pool_set(struct devlink *devlink,
  678. unsigned int sb_index, u16 pool_index, u32 size,
  679. enum devlink_sb_threshold_type threshold_type)
  680. {
  681. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  682. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  683. if (!mlxsw_driver->sb_pool_set)
  684. return -EOPNOTSUPP;
  685. return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
  686. pool_index, size, threshold_type);
  687. }
  688. static void *__dl_port(struct devlink_port *devlink_port)
  689. {
  690. return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
  691. }
  692. static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
  693. enum devlink_port_type port_type)
  694. {
  695. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  696. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  697. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  698. if (!mlxsw_driver->port_type_set)
  699. return -EOPNOTSUPP;
  700. return mlxsw_driver->port_type_set(mlxsw_core,
  701. mlxsw_core_port->local_port,
  702. port_type);
  703. }
  704. static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
  705. unsigned int sb_index, u16 pool_index,
  706. u32 *p_threshold)
  707. {
  708. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  709. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  710. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  711. if (!mlxsw_driver->sb_port_pool_get ||
  712. !mlxsw_core_port_check(mlxsw_core_port))
  713. return -EOPNOTSUPP;
  714. return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
  715. pool_index, p_threshold);
  716. }
  717. static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
  718. unsigned int sb_index, u16 pool_index,
  719. u32 threshold)
  720. {
  721. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  722. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  723. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  724. if (!mlxsw_driver->sb_port_pool_set ||
  725. !mlxsw_core_port_check(mlxsw_core_port))
  726. return -EOPNOTSUPP;
  727. return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
  728. pool_index, threshold);
  729. }
  730. static int
  731. mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
  732. unsigned int sb_index, u16 tc_index,
  733. enum devlink_sb_pool_type pool_type,
  734. u16 *p_pool_index, u32 *p_threshold)
  735. {
  736. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  737. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  738. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  739. if (!mlxsw_driver->sb_tc_pool_bind_get ||
  740. !mlxsw_core_port_check(mlxsw_core_port))
  741. return -EOPNOTSUPP;
  742. return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
  743. tc_index, pool_type,
  744. p_pool_index, p_threshold);
  745. }
  746. static int
  747. mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
  748. unsigned int sb_index, u16 tc_index,
  749. enum devlink_sb_pool_type pool_type,
  750. u16 pool_index, u32 threshold)
  751. {
  752. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  753. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  754. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  755. if (!mlxsw_driver->sb_tc_pool_bind_set ||
  756. !mlxsw_core_port_check(mlxsw_core_port))
  757. return -EOPNOTSUPP;
  758. return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
  759. tc_index, pool_type,
  760. pool_index, threshold);
  761. }
  762. static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
  763. unsigned int sb_index)
  764. {
  765. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  766. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  767. if (!mlxsw_driver->sb_occ_snapshot)
  768. return -EOPNOTSUPP;
  769. return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
  770. }
  771. static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
  772. unsigned int sb_index)
  773. {
  774. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  775. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  776. if (!mlxsw_driver->sb_occ_max_clear)
  777. return -EOPNOTSUPP;
  778. return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
  779. }
  780. static int
  781. mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
  782. unsigned int sb_index, u16 pool_index,
  783. u32 *p_cur, u32 *p_max)
  784. {
  785. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  786. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  787. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  788. if (!mlxsw_driver->sb_occ_port_pool_get ||
  789. !mlxsw_core_port_check(mlxsw_core_port))
  790. return -EOPNOTSUPP;
  791. return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
  792. pool_index, p_cur, p_max);
  793. }
  794. static int
  795. mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
  796. unsigned int sb_index, u16 tc_index,
  797. enum devlink_sb_pool_type pool_type,
  798. u32 *p_cur, u32 *p_max)
  799. {
  800. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  801. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  802. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  803. if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
  804. !mlxsw_core_port_check(mlxsw_core_port))
  805. return -EOPNOTSUPP;
  806. return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
  807. sb_index, tc_index,
  808. pool_type, p_cur, p_max);
  809. }
  810. static const struct devlink_ops mlxsw_devlink_ops = {
  811. .port_type_set = mlxsw_devlink_port_type_set,
  812. .port_split = mlxsw_devlink_port_split,
  813. .port_unsplit = mlxsw_devlink_port_unsplit,
  814. .sb_pool_get = mlxsw_devlink_sb_pool_get,
  815. .sb_pool_set = mlxsw_devlink_sb_pool_set,
  816. .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
  817. .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
  818. .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
  819. .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
  820. .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
  821. .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
  822. .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
  823. .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
  824. };
  825. int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
  826. const struct mlxsw_bus *mlxsw_bus,
  827. void *bus_priv)
  828. {
  829. const char *device_kind = mlxsw_bus_info->device_kind;
  830. struct mlxsw_core *mlxsw_core;
  831. struct mlxsw_driver *mlxsw_driver;
  832. struct devlink *devlink;
  833. size_t alloc_size;
  834. int err;
  835. mlxsw_driver = mlxsw_core_driver_get(device_kind);
  836. if (!mlxsw_driver)
  837. return -EINVAL;
  838. alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
  839. devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
  840. if (!devlink) {
  841. err = -ENOMEM;
  842. goto err_devlink_alloc;
  843. }
  844. mlxsw_core = devlink_priv(devlink);
  845. INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
  846. INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
  847. mlxsw_core->driver = mlxsw_driver;
  848. mlxsw_core->bus = mlxsw_bus;
  849. mlxsw_core->bus_priv = bus_priv;
  850. mlxsw_core->bus_info = mlxsw_bus_info;
  851. err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
  852. &mlxsw_core->res);
  853. if (err)
  854. goto err_bus_init;
  855. err = mlxsw_ports_init(mlxsw_core);
  856. if (err)
  857. goto err_ports_init;
  858. if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
  859. MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
  860. alloc_size = sizeof(u8) *
  861. MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
  862. MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
  863. mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
  864. if (!mlxsw_core->lag.mapping) {
  865. err = -ENOMEM;
  866. goto err_alloc_lag_mapping;
  867. }
  868. }
  869. err = mlxsw_emad_init(mlxsw_core);
  870. if (err)
  871. goto err_emad_init;
  872. err = devlink_register(devlink, mlxsw_bus_info->dev);
  873. if (err)
  874. goto err_devlink_register;
  875. err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
  876. if (err)
  877. goto err_hwmon_init;
  878. err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
  879. &mlxsw_core->thermal);
  880. if (err)
  881. goto err_thermal_init;
  882. if (mlxsw_driver->init) {
  883. err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
  884. if (err)
  885. goto err_driver_init;
  886. }
  887. return 0;
  888. err_driver_init:
  889. mlxsw_thermal_fini(mlxsw_core->thermal);
  890. err_thermal_init:
  891. err_hwmon_init:
  892. devlink_unregister(devlink);
  893. err_devlink_register:
  894. mlxsw_emad_fini(mlxsw_core);
  895. err_emad_init:
  896. kfree(mlxsw_core->lag.mapping);
  897. err_alloc_lag_mapping:
  898. mlxsw_ports_fini(mlxsw_core);
  899. err_ports_init:
  900. mlxsw_bus->fini(bus_priv);
  901. err_bus_init:
  902. devlink_free(devlink);
  903. err_devlink_alloc:
  904. mlxsw_core_driver_put(device_kind);
  905. return err;
  906. }
  907. EXPORT_SYMBOL(mlxsw_core_bus_device_register);
  908. void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
  909. {
  910. const char *device_kind = mlxsw_core->bus_info->device_kind;
  911. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  912. if (mlxsw_core->driver->fini)
  913. mlxsw_core->driver->fini(mlxsw_core);
  914. mlxsw_thermal_fini(mlxsw_core->thermal);
  915. devlink_unregister(devlink);
  916. mlxsw_emad_fini(mlxsw_core);
  917. kfree(mlxsw_core->lag.mapping);
  918. mlxsw_ports_fini(mlxsw_core);
  919. mlxsw_core->bus->fini(mlxsw_core->bus_priv);
  920. devlink_free(devlink);
  921. mlxsw_core_driver_put(device_kind);
  922. }
  923. EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
  924. bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
  925. const struct mlxsw_tx_info *tx_info)
  926. {
  927. return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
  928. tx_info);
  929. }
  930. EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
  931. int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
  932. const struct mlxsw_tx_info *tx_info)
  933. {
  934. return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
  935. tx_info);
  936. }
  937. EXPORT_SYMBOL(mlxsw_core_skb_transmit);
  938. static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
  939. const struct mlxsw_rx_listener *rxl_b)
  940. {
  941. return (rxl_a->func == rxl_b->func &&
  942. rxl_a->local_port == rxl_b->local_port &&
  943. rxl_a->trap_id == rxl_b->trap_id);
  944. }
  945. static struct mlxsw_rx_listener_item *
  946. __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
  947. const struct mlxsw_rx_listener *rxl,
  948. void *priv)
  949. {
  950. struct mlxsw_rx_listener_item *rxl_item;
  951. list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
  952. if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
  953. rxl_item->priv == priv)
  954. return rxl_item;
  955. }
  956. return NULL;
  957. }
  958. int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
  959. const struct mlxsw_rx_listener *rxl,
  960. void *priv)
  961. {
  962. struct mlxsw_rx_listener_item *rxl_item;
  963. rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
  964. if (rxl_item)
  965. return -EEXIST;
  966. rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
  967. if (!rxl_item)
  968. return -ENOMEM;
  969. rxl_item->rxl = *rxl;
  970. rxl_item->priv = priv;
  971. list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
  972. return 0;
  973. }
  974. EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
  975. void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
  976. const struct mlxsw_rx_listener *rxl,
  977. void *priv)
  978. {
  979. struct mlxsw_rx_listener_item *rxl_item;
  980. rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
  981. if (!rxl_item)
  982. return;
  983. list_del_rcu(&rxl_item->list);
  984. synchronize_rcu();
  985. kfree(rxl_item);
  986. }
  987. EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
  988. static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
  989. void *priv)
  990. {
  991. struct mlxsw_event_listener_item *event_listener_item = priv;
  992. struct mlxsw_reg_info reg;
  993. char *payload;
  994. char *op_tlv = mlxsw_emad_op_tlv(skb);
  995. char *reg_tlv = mlxsw_emad_reg_tlv(skb);
  996. reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
  997. reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
  998. payload = mlxsw_emad_reg_payload(op_tlv);
  999. event_listener_item->el.func(&reg, payload, event_listener_item->priv);
  1000. dev_kfree_skb(skb);
  1001. }
  1002. static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
  1003. const struct mlxsw_event_listener *el_b)
  1004. {
  1005. return (el_a->func == el_b->func &&
  1006. el_a->trap_id == el_b->trap_id);
  1007. }
  1008. static struct mlxsw_event_listener_item *
  1009. __find_event_listener_item(struct mlxsw_core *mlxsw_core,
  1010. const struct mlxsw_event_listener *el,
  1011. void *priv)
  1012. {
  1013. struct mlxsw_event_listener_item *el_item;
  1014. list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
  1015. if (__is_event_listener_equal(&el_item->el, el) &&
  1016. el_item->priv == priv)
  1017. return el_item;
  1018. }
  1019. return NULL;
  1020. }
  1021. int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
  1022. const struct mlxsw_event_listener *el,
  1023. void *priv)
  1024. {
  1025. int err;
  1026. struct mlxsw_event_listener_item *el_item;
  1027. const struct mlxsw_rx_listener rxl = {
  1028. .func = mlxsw_core_event_listener_func,
  1029. .local_port = MLXSW_PORT_DONT_CARE,
  1030. .trap_id = el->trap_id,
  1031. };
  1032. el_item = __find_event_listener_item(mlxsw_core, el, priv);
  1033. if (el_item)
  1034. return -EEXIST;
  1035. el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
  1036. if (!el_item)
  1037. return -ENOMEM;
  1038. el_item->el = *el;
  1039. el_item->priv = priv;
  1040. err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
  1041. if (err)
  1042. goto err_rx_listener_register;
  1043. /* No reason to save item if we did not manage to register an RX
  1044. * listener for it.
  1045. */
  1046. list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
  1047. return 0;
  1048. err_rx_listener_register:
  1049. kfree(el_item);
  1050. return err;
  1051. }
  1052. EXPORT_SYMBOL(mlxsw_core_event_listener_register);
  1053. void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
  1054. const struct mlxsw_event_listener *el,
  1055. void *priv)
  1056. {
  1057. struct mlxsw_event_listener_item *el_item;
  1058. const struct mlxsw_rx_listener rxl = {
  1059. .func = mlxsw_core_event_listener_func,
  1060. .local_port = MLXSW_PORT_DONT_CARE,
  1061. .trap_id = el->trap_id,
  1062. };
  1063. el_item = __find_event_listener_item(mlxsw_core, el, priv);
  1064. if (!el_item)
  1065. return;
  1066. mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
  1067. list_del(&el_item->list);
  1068. kfree(el_item);
  1069. }
  1070. EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
  1071. static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
  1072. const struct mlxsw_listener *listener,
  1073. void *priv)
  1074. {
  1075. if (listener->is_event)
  1076. return mlxsw_core_event_listener_register(mlxsw_core,
  1077. &listener->u.event_listener,
  1078. priv);
  1079. else
  1080. return mlxsw_core_rx_listener_register(mlxsw_core,
  1081. &listener->u.rx_listener,
  1082. priv);
  1083. }
  1084. static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
  1085. const struct mlxsw_listener *listener,
  1086. void *priv)
  1087. {
  1088. if (listener->is_event)
  1089. mlxsw_core_event_listener_unregister(mlxsw_core,
  1090. &listener->u.event_listener,
  1091. priv);
  1092. else
  1093. mlxsw_core_rx_listener_unregister(mlxsw_core,
  1094. &listener->u.rx_listener,
  1095. priv);
  1096. }
  1097. int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
  1098. const struct mlxsw_listener *listener, void *priv)
  1099. {
  1100. char hpkt_pl[MLXSW_REG_HPKT_LEN];
  1101. int err;
  1102. err = mlxsw_core_listener_register(mlxsw_core, listener, priv);
  1103. if (err)
  1104. return err;
  1105. mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id,
  1106. listener->trap_group, listener->is_ctrl);
  1107. err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
  1108. if (err)
  1109. goto err_trap_set;
  1110. return 0;
  1111. err_trap_set:
  1112. mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
  1113. return err;
  1114. }
  1115. EXPORT_SYMBOL(mlxsw_core_trap_register);
  1116. void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
  1117. const struct mlxsw_listener *listener,
  1118. void *priv)
  1119. {
  1120. char hpkt_pl[MLXSW_REG_HPKT_LEN];
  1121. if (!listener->is_event) {
  1122. mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action,
  1123. listener->trap_id, listener->trap_group,
  1124. listener->is_ctrl);
  1125. mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
  1126. }
  1127. mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
  1128. }
  1129. EXPORT_SYMBOL(mlxsw_core_trap_unregister);
  1130. static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
  1131. {
  1132. return atomic64_inc_return(&mlxsw_core->emad.tid);
  1133. }
  1134. static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
  1135. const struct mlxsw_reg_info *reg,
  1136. char *payload,
  1137. enum mlxsw_core_reg_access_type type,
  1138. struct list_head *bulk_list,
  1139. mlxsw_reg_trans_cb_t *cb,
  1140. unsigned long cb_priv)
  1141. {
  1142. u64 tid = mlxsw_core_tid_get(mlxsw_core);
  1143. struct mlxsw_reg_trans *trans;
  1144. int err;
  1145. trans = kzalloc(sizeof(*trans), GFP_KERNEL);
  1146. if (!trans)
  1147. return -ENOMEM;
  1148. err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
  1149. bulk_list, cb, cb_priv, tid);
  1150. if (err) {
  1151. kfree(trans);
  1152. return err;
  1153. }
  1154. return 0;
  1155. }
  1156. int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
  1157. const struct mlxsw_reg_info *reg, char *payload,
  1158. struct list_head *bulk_list,
  1159. mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
  1160. {
  1161. return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
  1162. MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
  1163. bulk_list, cb, cb_priv);
  1164. }
  1165. EXPORT_SYMBOL(mlxsw_reg_trans_query);
  1166. int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
  1167. const struct mlxsw_reg_info *reg, char *payload,
  1168. struct list_head *bulk_list,
  1169. mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
  1170. {
  1171. return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
  1172. MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
  1173. bulk_list, cb, cb_priv);
  1174. }
  1175. EXPORT_SYMBOL(mlxsw_reg_trans_write);
  1176. static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
  1177. {
  1178. struct mlxsw_core *mlxsw_core = trans->core;
  1179. int err;
  1180. wait_for_completion(&trans->completion);
  1181. cancel_delayed_work_sync(&trans->timeout_dw);
  1182. err = trans->err;
  1183. if (trans->retries)
  1184. dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
  1185. trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
  1186. if (err)
  1187. dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
  1188. trans->tid, trans->reg->id,
  1189. mlxsw_reg_id_str(trans->reg->id),
  1190. mlxsw_core_reg_access_type_str(trans->type),
  1191. trans->emad_status,
  1192. mlxsw_emad_op_tlv_status_str(trans->emad_status));
  1193. list_del(&trans->bulk_list);
  1194. kfree_rcu(trans, rcu);
  1195. return err;
  1196. }
  1197. int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
  1198. {
  1199. struct mlxsw_reg_trans *trans;
  1200. struct mlxsw_reg_trans *tmp;
  1201. int sum_err = 0;
  1202. int err;
  1203. list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
  1204. err = mlxsw_reg_trans_wait(trans);
  1205. if (err && sum_err == 0)
  1206. sum_err = err; /* first error to be returned */
  1207. }
  1208. return sum_err;
  1209. }
  1210. EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
  1211. static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
  1212. const struct mlxsw_reg_info *reg,
  1213. char *payload,
  1214. enum mlxsw_core_reg_access_type type)
  1215. {
  1216. enum mlxsw_emad_op_tlv_status status;
  1217. int err, n_retry;
  1218. char *in_mbox, *out_mbox, *tmp;
  1219. dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
  1220. reg->id, mlxsw_reg_id_str(reg->id),
  1221. mlxsw_core_reg_access_type_str(type));
  1222. in_mbox = mlxsw_cmd_mbox_alloc();
  1223. if (!in_mbox)
  1224. return -ENOMEM;
  1225. out_mbox = mlxsw_cmd_mbox_alloc();
  1226. if (!out_mbox) {
  1227. err = -ENOMEM;
  1228. goto free_in_mbox;
  1229. }
  1230. mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
  1231. mlxsw_core_tid_get(mlxsw_core));
  1232. tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
  1233. mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
  1234. n_retry = 0;
  1235. retry:
  1236. err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
  1237. if (!err) {
  1238. err = mlxsw_emad_process_status(out_mbox, &status);
  1239. if (err) {
  1240. if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
  1241. goto retry;
  1242. dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
  1243. status, mlxsw_emad_op_tlv_status_str(status));
  1244. }
  1245. }
  1246. if (!err)
  1247. memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
  1248. reg->len);
  1249. mlxsw_cmd_mbox_free(out_mbox);
  1250. free_in_mbox:
  1251. mlxsw_cmd_mbox_free(in_mbox);
  1252. if (err)
  1253. dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
  1254. reg->id, mlxsw_reg_id_str(reg->id),
  1255. mlxsw_core_reg_access_type_str(type));
  1256. return err;
  1257. }
  1258. static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
  1259. char *payload, size_t payload_len,
  1260. unsigned long cb_priv)
  1261. {
  1262. char *orig_payload = (char *) cb_priv;
  1263. memcpy(orig_payload, payload, payload_len);
  1264. }
  1265. static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
  1266. const struct mlxsw_reg_info *reg,
  1267. char *payload,
  1268. enum mlxsw_core_reg_access_type type)
  1269. {
  1270. LIST_HEAD(bulk_list);
  1271. int err;
  1272. /* During initialization EMAD interface is not available to us,
  1273. * so we default to command interface. We switch to EMAD interface
  1274. * after setting the appropriate traps.
  1275. */
  1276. if (!mlxsw_core->emad.use_emad)
  1277. return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
  1278. payload, type);
  1279. err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
  1280. payload, type, &bulk_list,
  1281. mlxsw_core_reg_access_cb,
  1282. (unsigned long) payload);
  1283. if (err)
  1284. return err;
  1285. return mlxsw_reg_trans_bulk_wait(&bulk_list);
  1286. }
  1287. int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
  1288. const struct mlxsw_reg_info *reg, char *payload)
  1289. {
  1290. return mlxsw_core_reg_access(mlxsw_core, reg, payload,
  1291. MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
  1292. }
  1293. EXPORT_SYMBOL(mlxsw_reg_query);
  1294. int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
  1295. const struct mlxsw_reg_info *reg, char *payload)
  1296. {
  1297. return mlxsw_core_reg_access(mlxsw_core, reg, payload,
  1298. MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
  1299. }
  1300. EXPORT_SYMBOL(mlxsw_reg_write);
  1301. void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
  1302. struct mlxsw_rx_info *rx_info)
  1303. {
  1304. struct mlxsw_rx_listener_item *rxl_item;
  1305. const struct mlxsw_rx_listener *rxl;
  1306. u8 local_port;
  1307. bool found = false;
  1308. if (rx_info->is_lag) {
  1309. dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
  1310. __func__, rx_info->u.lag_id,
  1311. rx_info->trap_id);
  1312. /* Upper layer does not care if the skb came from LAG or not,
  1313. * so just get the local_port for the lag port and push it up.
  1314. */
  1315. local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
  1316. rx_info->u.lag_id,
  1317. rx_info->lag_port_index);
  1318. } else {
  1319. local_port = rx_info->u.sys_port;
  1320. }
  1321. dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
  1322. __func__, local_port, rx_info->trap_id);
  1323. if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
  1324. (local_port >= mlxsw_core->max_ports))
  1325. goto drop;
  1326. rcu_read_lock();
  1327. list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
  1328. rxl = &rxl_item->rxl;
  1329. if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
  1330. rxl->local_port == local_port) &&
  1331. rxl->trap_id == rx_info->trap_id) {
  1332. found = true;
  1333. break;
  1334. }
  1335. }
  1336. rcu_read_unlock();
  1337. if (!found)
  1338. goto drop;
  1339. rxl->func(skb, local_port, rxl_item->priv);
  1340. return;
  1341. drop:
  1342. dev_kfree_skb(skb);
  1343. }
  1344. EXPORT_SYMBOL(mlxsw_core_skb_receive);
  1345. static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
  1346. u16 lag_id, u8 port_index)
  1347. {
  1348. return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
  1349. port_index;
  1350. }
  1351. void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
  1352. u16 lag_id, u8 port_index, u8 local_port)
  1353. {
  1354. int index = mlxsw_core_lag_mapping_index(mlxsw_core,
  1355. lag_id, port_index);
  1356. mlxsw_core->lag.mapping[index] = local_port;
  1357. }
  1358. EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
  1359. u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
  1360. u16 lag_id, u8 port_index)
  1361. {
  1362. int index = mlxsw_core_lag_mapping_index(mlxsw_core,
  1363. lag_id, port_index);
  1364. return mlxsw_core->lag.mapping[index];
  1365. }
  1366. EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
  1367. void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
  1368. u16 lag_id, u8 local_port)
  1369. {
  1370. int i;
  1371. for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
  1372. int index = mlxsw_core_lag_mapping_index(mlxsw_core,
  1373. lag_id, i);
  1374. if (mlxsw_core->lag.mapping[index] == local_port)
  1375. mlxsw_core->lag.mapping[index] = 0;
  1376. }
  1377. }
  1378. EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
  1379. bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
  1380. enum mlxsw_res_id res_id)
  1381. {
  1382. return mlxsw_res_valid(&mlxsw_core->res, res_id);
  1383. }
  1384. EXPORT_SYMBOL(mlxsw_core_res_valid);
  1385. u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
  1386. enum mlxsw_res_id res_id)
  1387. {
  1388. return mlxsw_res_get(&mlxsw_core->res, res_id);
  1389. }
  1390. EXPORT_SYMBOL(mlxsw_core_res_get);
  1391. int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
  1392. {
  1393. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  1394. struct mlxsw_core_port *mlxsw_core_port =
  1395. &mlxsw_core->ports[local_port];
  1396. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1397. int err;
  1398. mlxsw_core_port->local_port = local_port;
  1399. err = devlink_port_register(devlink, devlink_port, local_port);
  1400. if (err)
  1401. memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
  1402. return err;
  1403. }
  1404. EXPORT_SYMBOL(mlxsw_core_port_init);
  1405. void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
  1406. {
  1407. struct mlxsw_core_port *mlxsw_core_port =
  1408. &mlxsw_core->ports[local_port];
  1409. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1410. devlink_port_unregister(devlink_port);
  1411. memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
  1412. }
  1413. EXPORT_SYMBOL(mlxsw_core_port_fini);
  1414. void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
  1415. void *port_driver_priv, struct net_device *dev,
  1416. bool split, u32 split_group)
  1417. {
  1418. struct mlxsw_core_port *mlxsw_core_port =
  1419. &mlxsw_core->ports[local_port];
  1420. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1421. mlxsw_core_port->port_driver_priv = port_driver_priv;
  1422. if (split)
  1423. devlink_port_split_set(devlink_port, split_group);
  1424. devlink_port_type_eth_set(devlink_port, dev);
  1425. }
  1426. EXPORT_SYMBOL(mlxsw_core_port_eth_set);
  1427. void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
  1428. void *port_driver_priv)
  1429. {
  1430. struct mlxsw_core_port *mlxsw_core_port =
  1431. &mlxsw_core->ports[local_port];
  1432. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1433. mlxsw_core_port->port_driver_priv = port_driver_priv;
  1434. devlink_port_type_ib_set(devlink_port, NULL);
  1435. }
  1436. EXPORT_SYMBOL(mlxsw_core_port_ib_set);
  1437. void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
  1438. void *port_driver_priv)
  1439. {
  1440. struct mlxsw_core_port *mlxsw_core_port =
  1441. &mlxsw_core->ports[local_port];
  1442. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1443. mlxsw_core_port->port_driver_priv = port_driver_priv;
  1444. devlink_port_type_clear(devlink_port);
  1445. }
  1446. EXPORT_SYMBOL(mlxsw_core_port_clear);
  1447. enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
  1448. u8 local_port)
  1449. {
  1450. struct mlxsw_core_port *mlxsw_core_port =
  1451. &mlxsw_core->ports[local_port];
  1452. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1453. return devlink_port->type;
  1454. }
  1455. EXPORT_SYMBOL(mlxsw_core_port_type_get);
  1456. static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
  1457. const char *buf, size_t size)
  1458. {
  1459. __be32 *m = (__be32 *) buf;
  1460. int i;
  1461. int count = size / sizeof(__be32);
  1462. for (i = count - 1; i >= 0; i--)
  1463. if (m[i])
  1464. break;
  1465. i++;
  1466. count = i ? i : 1;
  1467. for (i = 0; i < count; i += 4)
  1468. dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
  1469. i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
  1470. be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
  1471. }
  1472. int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
  1473. u32 in_mod, bool out_mbox_direct,
  1474. char *in_mbox, size_t in_mbox_size,
  1475. char *out_mbox, size_t out_mbox_size)
  1476. {
  1477. u8 status;
  1478. int err;
  1479. BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
  1480. if (!mlxsw_core->bus->cmd_exec)
  1481. return -EOPNOTSUPP;
  1482. dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
  1483. opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
  1484. if (in_mbox) {
  1485. dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
  1486. mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
  1487. }
  1488. err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
  1489. opcode_mod, in_mod, out_mbox_direct,
  1490. in_mbox, in_mbox_size,
  1491. out_mbox, out_mbox_size, &status);
  1492. if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
  1493. dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
  1494. opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
  1495. in_mod, status, mlxsw_cmd_status_str(status));
  1496. } else if (err == -ETIMEDOUT) {
  1497. dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
  1498. opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
  1499. in_mod);
  1500. }
  1501. if (!err && out_mbox) {
  1502. dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
  1503. mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
  1504. }
  1505. return err;
  1506. }
  1507. EXPORT_SYMBOL(mlxsw_cmd_exec);
  1508. int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
  1509. {
  1510. return queue_delayed_work(mlxsw_wq, dwork, delay);
  1511. }
  1512. EXPORT_SYMBOL(mlxsw_core_schedule_dw);
  1513. bool mlxsw_core_schedule_work(struct work_struct *work)
  1514. {
  1515. return queue_work(mlxsw_owq, work);
  1516. }
  1517. EXPORT_SYMBOL(mlxsw_core_schedule_work);
  1518. void mlxsw_core_flush_owq(void)
  1519. {
  1520. flush_workqueue(mlxsw_owq);
  1521. }
  1522. EXPORT_SYMBOL(mlxsw_core_flush_owq);
  1523. static int __init mlxsw_core_module_init(void)
  1524. {
  1525. int err;
  1526. mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
  1527. if (!mlxsw_wq)
  1528. return -ENOMEM;
  1529. mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
  1530. mlxsw_core_driver_name);
  1531. if (!mlxsw_owq) {
  1532. err = -ENOMEM;
  1533. goto err_alloc_ordered_workqueue;
  1534. }
  1535. return 0;
  1536. err_alloc_ordered_workqueue:
  1537. destroy_workqueue(mlxsw_wq);
  1538. return err;
  1539. }
  1540. static void __exit mlxsw_core_module_exit(void)
  1541. {
  1542. destroy_workqueue(mlxsw_owq);
  1543. destroy_workqueue(mlxsw_wq);
  1544. }
  1545. module_init(mlxsw_core_module_init);
  1546. module_exit(mlxsw_core_module_exit);
  1547. MODULE_LICENSE("Dual BSD/GPL");
  1548. MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
  1549. MODULE_DESCRIPTION("Mellanox switch device core driver");