hsr_prp_forward.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright 2011-2014 Autronica Fire and Security AS
  3. *
  4. * Author(s):
  5. * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
  6. */
  7. #include <linux/types.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/etherdevice.h>
  10. #include <linux/if_vlan.h>
  11. #include "hsr_prp_forward.h"
  12. #include "hsr_prp_main.h"
  13. #include "hsr_prp_framereg.h"
  14. struct hsr_prp_node;
  15. struct hsr_prp_frame_info {
  16. struct sk_buff *skb_std;
  17. struct sk_buff *skb_hsr;
  18. struct sk_buff *skb_prp;
  19. struct hsr_prp_port *port_rcv;
  20. struct hsr_prp_node *node_src;
  21. u16 sequence_nr;
  22. bool is_supervision;
  23. bool is_vlan;
  24. bool is_local_dest;
  25. bool is_local_exclusive;
  26. bool is_from_san;
  27. struct skb_redundant_info *sred;
  28. };
  29. static inline int is_hsr_l2ptp(struct sk_buff *skb,
  30. struct hsr_prp_frame_info *frame)
  31. {
  32. struct hsr_ethhdr *hsr_ethhdr;
  33. unsigned char *pc;
  34. pc = skb_mac_header(skb);
  35. if (frame->is_vlan)
  36. hsr_ethhdr = (struct hsr_ethhdr *)(pc + VLAN_HLEN);
  37. else
  38. hsr_ethhdr = (struct hsr_ethhdr *)pc;
  39. return (hsr_ethhdr->ethhdr.h_proto == htons(ETH_P_HSR) &&
  40. hsr_ethhdr->hsr_tag.encap_proto == htons(ETH_P_1588));
  41. }
  42. static inline int is_hsr_l2ptp_evt(struct sk_buff *skb,
  43. struct hsr_prp_frame_info *frame)
  44. {
  45. unsigned char *p;
  46. if (!skb->data)
  47. return 0;
  48. p = skb->data;
  49. if (frame->is_vlan)
  50. p += VLAN_HLEN;
  51. /* FIXME: should use macros to access header fields */
  52. return (*(p + 12) == 0x89 && *(p + 13) == 0x2f && /* HSR */
  53. *(p + 18) == 0x88 && *(p + 19) == 0xf7 && /* PTP */
  54. (*(p + 20) == 0x00 ||
  55. *(p + 20) == 0x02 ||
  56. *(p + 20) == 0x03)); /* EVT */
  57. }
  58. /* The uses I can see for these HSR supervision frames are:
  59. * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
  60. * 22") to reset any sequence_nr counters belonging to that node. Useful if
  61. * the other node's counter has been reset for some reason.
  62. * --
  63. * Or not - resetting the counter and bridging the frame would create a
  64. * loop, unfortunately.
  65. *
  66. * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
  67. * frame is received from a particular node, we know something is wrong.
  68. * We just register these (as with normal frames) and throw them away.
  69. *
  70. * 3) Allow different MAC addresses for the two slave interfaces, using the
  71. * MacAddressA field.
  72. */
  73. static bool is_supervision_frame(struct hsr_prp_priv *priv, struct sk_buff *skb)
  74. {
  75. struct hsrv1_ethhdr_vlan_sp *hsr_v1_vlan_hdr;
  76. struct hsr_prp_sup_tag *hsr_sup_tag;
  77. struct hsrv1_ethhdr_sp *hsr_v1_hdr;
  78. struct ethhdr *eth_hdr;
  79. bool vlan = false;
  80. u16 proto;
  81. WARN_ON_ONCE(!skb_mac_header_was_set(skb));
  82. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  83. /* Correct addr? */
  84. if (!ether_addr_equal(eth_hdr->h_dest,
  85. priv->sup_multicast_addr))
  86. return false;
  87. if (skb_vlan_tagged(skb)) {
  88. proto = vlan_get_protocol(skb);
  89. vlan = true;
  90. } else {
  91. proto = eth_hdr->h_proto;
  92. }
  93. /* Correct ether type?. */
  94. if (!(proto == htons(ETH_P_PRP) || proto == htons(ETH_P_HSR)))
  95. return false;
  96. /* Get the supervision header from correct location. */
  97. if (proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
  98. if (!vlan) {
  99. hsr_v1_hdr = (struct hsrv1_ethhdr_sp *)eth_hdr;
  100. if (hsr_v1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
  101. return false;
  102. hsr_sup_tag = &hsr_v1_hdr->hsr_sup;
  103. } else {
  104. hsr_v1_vlan_hdr =
  105. (struct hsrv1_ethhdr_vlan_sp *)eth_hdr;
  106. if (hsr_v1_vlan_hdr->hsr.encap_proto !=
  107. htons(ETH_P_PRP))
  108. return false;
  109. hsr_sup_tag = &hsr_v1_vlan_hdr->hsr_sup;
  110. }
  111. } else {
  112. if (!vlan)
  113. hsr_sup_tag =
  114. &((struct hsrv0_ethhdr_sp *)eth_hdr)->hsr_sup;
  115. else
  116. hsr_sup_tag =
  117. &((struct hsrv0_ethhdr_vlan_sp *)eth_hdr)->hsr_sup;
  118. }
  119. if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE &&
  120. hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
  121. hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
  122. hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
  123. return false;
  124. if (hsr_sup_tag->HSR_TLV_length != 12 &&
  125. hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_prp_sup_payload))
  126. return false;
  127. return true;
  128. }
  129. static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
  130. struct hsr_prp_frame_info *frame)
  131. {
  132. struct sk_buff *skb;
  133. int copylen;
  134. unsigned char *dst, *src;
  135. skb_pull(skb_in, HSR_PRP_HLEN);
  136. skb = __pskb_copy(skb_in,
  137. skb_headroom(skb_in) - HSR_PRP_HLEN, GFP_ATOMIC);
  138. skb_push(skb_in, HSR_PRP_HLEN);
  139. if (!skb)
  140. return NULL;
  141. skb_reset_mac_header(skb);
  142. if (skb->ip_summed == CHECKSUM_PARTIAL)
  143. skb->csum_start -= HSR_PRP_HLEN;
  144. copylen = 2 * ETH_ALEN;
  145. if (frame->is_vlan)
  146. copylen += VLAN_HLEN;
  147. src = skb_mac_header(skb_in);
  148. dst = skb_mac_header(skb);
  149. memcpy(dst, src, copylen);
  150. skb->protocol = eth_hdr(skb)->h_proto;
  151. return skb;
  152. }
  153. static struct sk_buff *frame_get_stripped_skb(struct hsr_prp_frame_info *frame,
  154. struct hsr_prp_port *port)
  155. {
  156. struct hsr_prp_priv *priv = port->priv;
  157. if (!frame->skb_std) {
  158. if (frame->skb_hsr) {
  159. frame->skb_std =
  160. create_stripped_skb_hsr(frame->skb_hsr, frame);
  161. } else if (frame->skb_prp) {
  162. /* trim the skb by len - HSR_PRP_HLEN to exclude
  163. * RCT if configured to remove RCT
  164. */
  165. if (!priv->rx_offloaded &&
  166. priv->prp_tr == IEC62439_3_TR_REMOVE_RCT)
  167. skb_trim(frame->skb_prp,
  168. frame->skb_prp->len - HSR_PRP_HLEN);
  169. frame->skb_std =
  170. __pskb_copy(frame->skb_prp,
  171. skb_headroom(frame->skb_prp),
  172. GFP_ATOMIC);
  173. } else {
  174. /* Unexpected */
  175. WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
  176. __FILE__, __LINE__, port->dev->name);
  177. return NULL;
  178. }
  179. }
  180. return skb_clone(frame->skb_std, GFP_ATOMIC);
  181. }
  182. /* only prp skb should be passed in */
  183. static void prp_check_lan_id(struct sk_buff *skb, struct hsr_prp_port *port)
  184. {
  185. int lan_id;
  186. struct prp_rct *trailor = skb_get_PRP_rct(skb);
  187. if (!trailor) {
  188. INC_CNT_RX_ERROR_AB(port->type, port->priv);
  189. return;
  190. }
  191. lan_id = get_prp_lan_id(trailor);
  192. if (port->type == HSR_PRP_PT_SLAVE_A) {
  193. if (lan_id & 1)
  194. INC_CNT_RX_WRONG_LAN_AB(port->type, port->priv);
  195. } else {
  196. if (!(lan_id & 1))
  197. INC_CNT_RX_WRONG_LAN_AB(port->type, port->priv);
  198. }
  199. }
  200. static void prp_set_lan_id(struct prp_rct *trailor,
  201. struct hsr_prp_port *port)
  202. {
  203. int lane_id;
  204. if (port->type == HSR_PRP_PT_SLAVE_A)
  205. lane_id = 0;
  206. else
  207. lane_id = 1;
  208. /* Add net_id in the upper 3 bits of lane_id */
  209. lane_id |= port->priv->net_id;
  210. set_prp_lan_id(trailor, lane_id);
  211. }
  212. /* Tailroom for PRP rct should have been created before calling this */
  213. static void prp_fill_rct(struct sk_buff *skb,
  214. struct hsr_prp_frame_info *frame,
  215. struct hsr_prp_port *port)
  216. {
  217. struct prp_rct *trailor;
  218. int lsdu_size;
  219. if (!skb)
  220. return;
  221. if (frame->is_vlan)
  222. skb_put_padto(skb, VLAN_ETH_ZLEN);
  223. else
  224. skb_put_padto(skb, ETH_ZLEN);
  225. trailor = (struct prp_rct *)skb_put(skb, HSR_PRP_HLEN);
  226. lsdu_size = skb->len - 14;
  227. if (frame->is_vlan)
  228. lsdu_size -= 4;
  229. prp_set_lan_id(trailor, port);
  230. set_prp_LSDU_size(trailor, lsdu_size);
  231. trailor->sequence_nr = htons(frame->sequence_nr);
  232. trailor->PRP_suffix = htons(ETH_P_PRP);
  233. }
  234. static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
  235. struct hsr_prp_port *port)
  236. {
  237. int path_id;
  238. if (port->type == HSR_PRP_PT_SLAVE_A)
  239. path_id = 0;
  240. else
  241. path_id = 1;
  242. set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
  243. }
  244. static void hsr_fill_tag(struct sk_buff *skb, struct hsr_prp_frame_info *frame,
  245. struct hsr_prp_port *port, u8 proto_version)
  246. {
  247. struct hsr_ethhdr *hsr_ethhdr;
  248. unsigned char *pc;
  249. int lsdu_size;
  250. /* pad to minimum packet size which is 60 + 6 (HSR tag) */
  251. skb_put_padto(skb, ETH_ZLEN + HSR_PRP_HLEN);
  252. lsdu_size = skb->len - 14;
  253. if (frame->is_vlan)
  254. lsdu_size -= 4;
  255. pc = skb_mac_header(skb);
  256. if (frame->is_vlan)
  257. /* This 4-byte shift (size of a vlan tag) does not
  258. * mean that the ethhdr starts there. But rather it
  259. * provides the proper environment for accessing
  260. * the fields, such as hsr_tag etc., just like
  261. * when the vlan tag is not there. This is because
  262. * the hsr tag is after the vlan tag.
  263. */
  264. hsr_ethhdr = (struct hsr_ethhdr *)(pc + VLAN_HLEN);
  265. else
  266. hsr_ethhdr = (struct hsr_ethhdr *)pc;
  267. if (REDINFO_T(skb) == DIRECTED_TX)
  268. set_hsr_tag_path(&hsr_ethhdr->hsr_tag, REDINFO_PATHID(skb));
  269. else
  270. hsr_set_path_id(hsr_ethhdr, port);
  271. set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
  272. hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
  273. hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
  274. hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
  275. ETH_P_HSR : ETH_P_PRP);
  276. }
  277. static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
  278. struct hsr_prp_frame_info *frame,
  279. struct hsr_prp_port *port)
  280. {
  281. int movelen;
  282. unsigned char *dst, *src, *pc;
  283. struct sk_buff *skb;
  284. struct skb_redundant_info *sred;
  285. struct hsr_ethhdr *hsr_ethhdr;
  286. u16 s;
  287. if (port->priv->prot_version > HSR_V1) {
  288. skb = skb_copy_expand(skb_o, skb_headroom(skb_o),
  289. skb_tailroom(skb_o) + HSR_PRP_HLEN,
  290. GFP_ATOMIC);
  291. prp_fill_rct(skb, frame, port);
  292. return skb;
  293. } else if ((port->priv->prot_version == HSR_V1) &&
  294. (port->priv->hsr_mode == IEC62439_3_HSR_MODE_T)) {
  295. return skb_clone(skb_o, GFP_ATOMIC);
  296. }
  297. /* Create the new skb with enough headroom to fit the HSR tag */
  298. skb = __pskb_copy(skb_o,
  299. skb_headroom(skb_o) + HSR_PRP_HLEN, GFP_ATOMIC);
  300. if (!skb)
  301. return NULL;
  302. skb_reset_mac_header(skb);
  303. if (skb->ip_summed == CHECKSUM_PARTIAL)
  304. skb->csum_start += HSR_PRP_HLEN;
  305. movelen = ETH_HLEN;
  306. if (frame->is_vlan)
  307. movelen += VLAN_HLEN;
  308. src = skb_mac_header(skb);
  309. dst = skb_push(skb, HSR_PRP_HLEN);
  310. memmove(dst, src, movelen);
  311. skb_reset_mac_header(skb);
  312. hsr_fill_tag(skb, frame, port, port->priv->prot_version);
  313. if (REDINFO_T(skb) == DIRECTED_TX)
  314. return skb;
  315. skb_shinfo(skb)->tx_flags = skb_shinfo(skb_o)->tx_flags;
  316. skb->sk = skb_o->sk;
  317. /* TODO: should check socket option instead? */
  318. if (is_hsr_l2ptp(skb, frame)) {
  319. sred = skb_redinfo(skb);
  320. pc = skb_mac_header(skb);
  321. if (frame->is_vlan)
  322. hsr_ethhdr = (struct hsr_ethhdr *)(pc + VLAN_HLEN);
  323. else
  324. hsr_ethhdr = (struct hsr_ethhdr *)pc;
  325. sred->io_port = (PTP_EVT_OUT | BIT(port->type - 1));
  326. sred->ethertype = ntohs(hsr_ethhdr->ethhdr.h_proto);
  327. s = ntohs(hsr_ethhdr->hsr_tag.path_and_LSDU_size);
  328. sred->lsdu_size = s & 0xfff;
  329. sred->pathid = (s >> 12) & 0xf;
  330. sred->seqnr = hsr_get_skb_sequence_nr(skb);
  331. }
  332. return skb;
  333. }
  334. /* If the original frame was an HSR tagged frame, just clone it to be sent
  335. * unchanged. Otherwise, create a private frame especially tagged for 'port'.
  336. */
  337. static struct sk_buff *frame_get_tagged_skb(struct hsr_prp_frame_info *frame,
  338. struct hsr_prp_port *port)
  339. {
  340. if (frame->skb_hsr) {
  341. u8 *pc;
  342. struct hsr_ethhdr *hsr_ethhdr =
  343. (struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
  344. /* This case is for SV frame created by this device */
  345. pc = (u8 *)hsr_ethhdr;
  346. if (frame->is_vlan)
  347. /* This 4-byte shift (size of a vlan tag) does not
  348. * mean that the ethhdr starts there. But rather it
  349. * provides the proper environment for accessing
  350. * the fields, such as hsr_tag etc., just like
  351. * when the vlan tag is not there. This is because
  352. * the hsr tag is after the vlan tag.
  353. */
  354. hsr_ethhdr = (struct hsr_ethhdr *)(pc + VLAN_HLEN);
  355. else
  356. hsr_ethhdr = (struct hsr_ethhdr *)pc;
  357. /* set the lane id properly */
  358. hsr_set_path_id(hsr_ethhdr, port);
  359. return skb_clone(frame->skb_hsr, GFP_ATOMIC);
  360. }
  361. if (frame->skb_prp) {
  362. struct prp_rct *trailor = skb_get_PRP_rct(frame->skb_prp);
  363. if (trailor) {
  364. prp_set_lan_id(trailor, port);
  365. } else {
  366. WARN_ONCE(!trailor, "errored PRP skb");
  367. return NULL;
  368. }
  369. return skb_clone(frame->skb_prp, GFP_ATOMIC);
  370. }
  371. if (port->type != HSR_PRP_PT_SLAVE_A &&
  372. port->type != HSR_PRP_PT_SLAVE_B) {
  373. WARN_ONCE(1, "HSR: Bug: trying to create a tagged frame for a non-ring port");
  374. return NULL;
  375. }
  376. return create_tagged_skb(frame->skb_std, frame, port);
  377. }
  378. static void deliver_master(struct sk_buff *skb, struct hsr_prp_node *node_src,
  379. struct hsr_prp_port *port)
  380. {
  381. struct hsr_prp_priv *priv = port->priv;
  382. struct net_device *dev = port->dev;
  383. bool was_multicast_frame;
  384. int res;
  385. was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
  386. /* For LRE offloaded case, assume same MAC address is on both
  387. * interfaces of the remote node and hence no need to substitute
  388. * the source MAC address.
  389. */
  390. if (!port->priv->rx_offloaded)
  391. hsr_prp_addr_subst_source(node_src, skb);
  392. skb_pull(skb, ETH_HLEN);
  393. res = netif_rx(skb);
  394. if (res == NET_RX_DROP) {
  395. dev->stats.rx_dropped++;
  396. } else {
  397. dev->stats.rx_packets++;
  398. dev->stats.rx_bytes += skb->len;
  399. if (was_multicast_frame)
  400. dev->stats.multicast++;
  401. INC_CNT_TX_C(priv);
  402. }
  403. }
  404. static int slave_xmit(struct sk_buff *skb, struct hsr_prp_port *port,
  405. struct hsr_prp_frame_info *frame)
  406. {
  407. if (!port->priv->rx_offloaded &&
  408. frame->port_rcv->type == HSR_PRP_PT_MASTER) {
  409. hsr_prp_addr_subst_dest(frame->node_src, skb, port);
  410. /* Address substitution (IEC62439-3 pp 26, 50): replace mac
  411. * address of outgoing frame with that of the outgoing slave's.
  412. */
  413. ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
  414. }
  415. INC_CNT_TX_AB(port->type, port->priv);
  416. return dev_queue_xmit(skb);
  417. }
  418. static void stripped_skb_get_shared_info(struct sk_buff *skb_stripped,
  419. struct hsr_prp_frame_info *frame)
  420. {
  421. struct hsr_prp_port *port_rcv = frame->port_rcv;
  422. struct sk_buff *skb_hsr, *skb;
  423. struct skb_redundant_info *sred;
  424. struct hsr_ethhdr *hsr_ethhdr;
  425. unsigned char *pc;
  426. u16 s;
  427. if (port_rcv->priv->prot_version > HSR_V1)
  428. return;
  429. if (!frame->skb_hsr)
  430. return;
  431. skb_hsr = frame->skb_hsr;
  432. skb = skb_stripped;
  433. if (is_hsr_l2ptp_evt(skb_hsr, frame)) {
  434. /* Rx timestamp */
  435. skb_hwtstamps(skb)->hwtstamp = skb_hwtstamps(skb_hsr)->hwtstamp;
  436. /* Cut-through tx timestamp */
  437. skb_redinfo_hwtstamps(skb)->hwtstamp =
  438. skb_redinfo_hwtstamps(skb_hsr)->hwtstamp;
  439. }
  440. if (is_hsr_l2ptp(skb_hsr, frame)) {
  441. sred = skb_redinfo(skb);
  442. pc = skb_mac_header(skb);
  443. if (frame->is_vlan)
  444. hsr_ethhdr = (struct hsr_ethhdr *)(pc + VLAN_HLEN);
  445. else
  446. hsr_ethhdr = (struct hsr_ethhdr *)pc;
  447. sred->io_port = (PTP_MSG_IN | BIT(port_rcv->type - 1));
  448. sred->ethertype = ntohs(hsr_ethhdr->ethhdr.h_proto);
  449. s = ntohs(hsr_ethhdr->hsr_tag.path_and_LSDU_size);
  450. sred->lsdu_size = s & 0xfff;
  451. sred->pathid = (s >> 12) & 0xf;
  452. sred->seqnr = frame->sequence_nr;
  453. }
  454. }
  455. static unsigned int
  456. hsr_prp_directed_tx_ports(struct hsr_prp_frame_info *frame)
  457. {
  458. struct sk_buff *skb;
  459. if (frame->skb_std)
  460. skb = frame->skb_std;
  461. else
  462. return 0;
  463. if (REDINFO_T(skb) == DIRECTED_TX)
  464. return REDINFO_PORTS(skb);
  465. return 0;
  466. }
  467. /* Forward the frame through all devices except:
  468. * - Back through the receiving device
  469. * - If it's a HSR frame: through a device where it has passed before
  470. * - if it's a PRP frame: through another PRP slave device (no bridge)
  471. * - To the local HSR master only if the frame is directly addressed to it, or
  472. * a non-supervision multicast or broadcast frame.
  473. *
  474. * HSR slave devices should insert a HSR tag into the frame, or forward the
  475. * frame unchanged if it's already tagged. Interlink devices should strip HSR
  476. * tags if they're of the non-HSR type (but only after duplicate discard). The
  477. * master device always strips HSR tags.
  478. */
  479. static void hsr_prp_forward_do(struct hsr_prp_frame_info *frame)
  480. {
  481. struct hsr_prp_port *port;
  482. struct sk_buff *skb = NULL;
  483. unsigned int dir_ports = 0;
  484. hsr_prp_for_each_port(frame->port_rcv->priv, port) {
  485. /* Don't send frame back the way it came */
  486. if (port == frame->port_rcv)
  487. continue;
  488. /* Don't deliver locally unless we should */
  489. if (port->type == HSR_PRP_PT_MASTER && !frame->is_local_dest)
  490. continue;
  491. /* Deliver frames directly addressed to us to master only */
  492. if (port->type != HSR_PRP_PT_MASTER &&
  493. frame->is_local_exclusive)
  494. continue;
  495. /* Don't send frame over port where it has been sent before.
  496. * Also if rx LRE is offloaded, hardware does duplication
  497. * detection and discard and send only one copy to the upper
  498. * device and thus discard duplicate detection. For PRP, frame
  499. * could be from a SAN for which bypass duplicate discard here.
  500. */
  501. if (!port->priv->rx_offloaded && !frame->is_from_san &&
  502. hsr_prp_register_frame_out(port, frame->node_src,
  503. frame->sequence_nr))
  504. continue;
  505. /* In LRE offloaded case, don't expect supervision frames from
  506. * slave ports for host as they get processed at the h/w or
  507. * firmware.
  508. */
  509. if (frame->is_supervision &&
  510. port->type == HSR_PRP_PT_MASTER &&
  511. !port->priv->rx_offloaded) {
  512. if (frame->skb_hsr)
  513. skb = frame->skb_hsr;
  514. else if (frame->skb_prp)
  515. skb = frame->skb_prp;
  516. if (skb)
  517. hsr_prp_handle_sup_frame(skb,
  518. frame->node_src,
  519. frame->port_rcv);
  520. continue;
  521. }
  522. /* if L2 forward is offloaded, or protocol is PRP,
  523. * don't forward frame across slaves.
  524. */
  525. if ((port->priv->l2_fwd_offloaded ||
  526. port->priv->prot_version == PRP_V1) &&
  527. ((frame->port_rcv->type == HSR_PRP_PT_SLAVE_A &&
  528. port->type == HSR_PRP_PT_SLAVE_B) ||
  529. (frame->port_rcv->type == HSR_PRP_PT_SLAVE_B &&
  530. port->type == HSR_PRP_PT_SLAVE_A)))
  531. continue;
  532. dir_ports = hsr_prp_directed_tx_ports(frame);
  533. if (dir_ports && !(dir_ports & BIT(port->type - 1)))
  534. continue;
  535. if (port->type != HSR_PRP_PT_MASTER) {
  536. skb = frame_get_tagged_skb(frame, port);
  537. } else {
  538. skb = frame_get_stripped_skb(frame, port);
  539. stripped_skb_get_shared_info(skb, frame);
  540. }
  541. if (!skb) {
  542. if (frame->port_rcv->type == HSR_PRP_PT_SLAVE_A ||
  543. frame->port_rcv->type == HSR_PRP_PT_SLAVE_B)
  544. INC_CNT_RX_ERROR_AB(frame->port_rcv->type,
  545. port->priv);
  546. else {
  547. struct net_device *master_dev =
  548. hsr_prp_get_port(port->priv,
  549. HSR_PRP_PT_MASTER)->dev;
  550. master_dev->stats.rx_dropped++;
  551. }
  552. continue;
  553. }
  554. skb->dev = port->dev;
  555. if (port->type == HSR_PRP_PT_MASTER)
  556. deliver_master(skb, frame->node_src, port);
  557. else
  558. slave_xmit(skb, port, frame);
  559. }
  560. }
  561. static void check_local_dest(struct hsr_prp_priv *priv, struct sk_buff *skb,
  562. struct hsr_prp_frame_info *frame)
  563. {
  564. if (hsr_prp_addr_is_self(priv, eth_hdr(skb)->h_dest)) {
  565. frame->is_local_exclusive = true;
  566. skb->pkt_type = PACKET_HOST;
  567. } else {
  568. frame->is_local_exclusive = false;
  569. }
  570. if (skb->pkt_type == PACKET_HOST ||
  571. skb->pkt_type == PACKET_MULTICAST ||
  572. skb->pkt_type == PACKET_BROADCAST) {
  573. frame->is_local_dest = true;
  574. } else {
  575. frame->is_local_dest = false;
  576. }
  577. }
  578. static int fill_frame_info(struct hsr_prp_frame_info *frame,
  579. struct sk_buff *skb, struct hsr_prp_port *port)
  580. {
  581. struct hsr_prp_priv *priv = port->priv;
  582. struct ethhdr *ethhdr;
  583. struct hsr_vlan_ethhdr *vlan_hdr;
  584. unsigned long irqflags;
  585. u16 proto;
  586. memset(frame, 0, sizeof(*frame));
  587. frame->is_supervision = is_supervision_frame(port->priv, skb);
  588. /* When offloaded, don't expect Supervision frame which
  589. * is terminated at h/w or f/w that offload the LRE
  590. */
  591. if (frame->is_supervision && priv->rx_offloaded &&
  592. port->type != HSR_PRP_PT_MASTER)
  593. return -1;
  594. if (frame->is_supervision) {
  595. if (port->type == HSR_PRP_PT_SLAVE_A)
  596. INC_CNT_RX_SUP_A(priv);
  597. else if (port->type == HSR_PRP_PT_SLAVE_B)
  598. INC_CNT_RX_SUP_B(priv);
  599. }
  600. /* For Offloaded case, there is no need for node list since
  601. * firmware/hardware implements LRE function.
  602. */
  603. if (!priv->rx_offloaded) {
  604. frame->node_src = hsr_prp_get_node(&priv->node_db, skb,
  605. frame->is_supervision,
  606. port->type);
  607. /* Unknown node and !is_supervision, or no mem */
  608. if (!frame->node_src)
  609. return -1;
  610. }
  611. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  612. frame->is_vlan = false;
  613. proto = ethhdr->h_proto;
  614. if (proto == htons(ETH_P_8021Q))
  615. frame->is_vlan = true;
  616. if (frame->is_vlan) {
  617. vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
  618. proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
  619. }
  620. frame->is_from_san = false;
  621. if (proto == htons(ETH_P_PRP) || proto == htons(ETH_P_HSR)) {
  622. struct prp_rct *rct = skb_get_PRP_rct(skb);
  623. if (rct &&
  624. prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
  625. frame->skb_hsr = NULL;
  626. frame->skb_std = NULL;
  627. frame->skb_prp = skb;
  628. frame->sequence_nr = prp_get_skb_sequence_nr(rct);
  629. } else {
  630. frame->skb_std = NULL;
  631. frame->skb_prp = NULL;
  632. frame->skb_hsr = skb;
  633. frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
  634. }
  635. } else {
  636. struct prp_rct *rct = skb_get_PRP_rct(skb);
  637. if (rct &&
  638. prp_check_lsdu_size(skb, rct, frame->is_supervision) &&
  639. port->priv->prot_version == PRP_V1) {
  640. frame->skb_hsr = NULL;
  641. frame->skb_std = NULL;
  642. frame->skb_prp = skb;
  643. frame->sequence_nr = prp_get_skb_sequence_nr(rct);
  644. frame->is_from_san = false;
  645. } else {
  646. frame->skb_hsr = NULL;
  647. frame->skb_prp = NULL;
  648. frame->skb_std = skb;
  649. if (port->type != HSR_PRP_PT_MASTER) {
  650. frame->is_from_san = true;
  651. } else {
  652. if ((REDINFO_T(skb) == DIRECTED_TX) &&
  653. (REDINFO_LSDU_SIZE(skb))) {
  654. frame->sequence_nr = REDINFO_SEQNR(skb);
  655. } else if (((priv->prot_version == HSR_V1) &&
  656. (priv->hsr_mode
  657. != IEC62439_3_HSR_MODE_T)) ||
  658. (priv->prot_version == PRP_V1) ||
  659. (priv->prot_version == HSR_V0)) {
  660. /* Sequence nr for the master node */
  661. spin_lock_irqsave(&priv->seqnr_lock,
  662. irqflags);
  663. frame->sequence_nr = priv->sequence_nr;
  664. priv->sequence_nr++;
  665. spin_unlock_irqrestore(&priv->seqnr_lock,
  666. irqflags);
  667. }
  668. }
  669. }
  670. }
  671. frame->port_rcv = port;
  672. check_local_dest(priv, skb, frame);
  673. return 0;
  674. }
  675. /* Must be called holding rcu read lock (because of the port parameter) */
  676. void hsr_prp_forward_skb(struct sk_buff *skb, struct hsr_prp_port *port)
  677. {
  678. struct hsr_prp_frame_info frame;
  679. if (skb_mac_header(skb) != skb->data) {
  680. WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
  681. __FILE__, __LINE__, port->dev->name);
  682. goto out_drop;
  683. }
  684. if (fill_frame_info(&frame, skb, port) < 0)
  685. goto out_drop;
  686. /* Only accept packets for the protocol we have been configured */
  687. if ((frame.skb_hsr && port->priv->prot_version == PRP_V1) ||
  688. (frame.skb_prp && port->priv->prot_version <= HSR_V1))
  689. goto out_drop;
  690. /* Check for LAN_ID only for PRP */
  691. if (frame.skb_prp) {
  692. if (port->type == HSR_PRP_PT_SLAVE_A ||
  693. port->type == HSR_PRP_PT_SLAVE_B)
  694. prp_check_lan_id(frame.skb_prp, port);
  695. }
  696. /* No need to register frame when rx offload is supported */
  697. if (!port->priv->rx_offloaded)
  698. hsr_prp_register_frame_in(frame.node_src, port,
  699. frame.sequence_nr);
  700. hsr_prp_forward_do(&frame);
  701. /* Gets called for ingress frames as well as egress from master port.
  702. * So check and increment stats for master port only here.
  703. */
  704. if (port->type == HSR_PRP_PT_MASTER) {
  705. port->dev->stats.tx_packets++;
  706. port->dev->stats.tx_bytes += skb->len;
  707. }
  708. if (frame.skb_hsr)
  709. kfree_skb(frame.skb_hsr);
  710. if (frame.skb_prp)
  711. kfree_skb(frame.skb_prp);
  712. if (frame.skb_std)
  713. kfree_skb(frame.skb_std);
  714. return;
  715. out_drop:
  716. INC_CNT_RX_ERROR_AB(port->type, port->priv);
  717. port->dev->stats.tx_dropped++;
  718. kfree_skb(skb);
  719. }