dvb_net.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639
  1. /*
  2. * dvb_net.c
  3. *
  4. * Copyright (C) 2001 Convergence integrated media GmbH
  5. * Ralph Metzler <ralph@convergence.de>
  6. * Copyright (C) 2002 Ralph Metzler <rjkm@metzlerbros.de>
  7. *
  8. * ULE Decapsulation code:
  9. * Copyright (C) 2003, 2004 gcs - Global Communication & Services GmbH.
  10. * and Department of Scientific Computing
  11. * Paris Lodron University of Salzburg.
  12. * Hilmar Linder <hlinder@cosy.sbg.ac.at>
  13. * and Wolfram Stering <wstering@cosy.sbg.ac.at>
  14. *
  15. * ULE Decaps according to RFC 4326.
  16. *
  17. * This program is free software; you can redistribute it and/or
  18. * modify it under the terms of the GNU General Public License
  19. * as published by the Free Software Foundation; either version 2
  20. * of the License, or (at your option) any later version.
  21. *
  22. * This program is distributed in the hope that it will be useful,
  23. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  24. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  25. * GNU General Public License for more details.
  26. * To obtain the license, point your browser to
  27. * http://www.gnu.org/copyleft/gpl.html
  28. */
  29. /*
  30. * ULE ChangeLog:
  31. * Feb 2004: hl/ws v1: Implementing draft-fair-ipdvb-ule-01.txt
  32. *
  33. * Dec 2004: hl/ws v2: Implementing draft-ietf-ipdvb-ule-03.txt:
  34. * ULE Extension header handling.
  35. * Bugreports by Moritz Vieth and Hanno Tersteegen,
  36. * Fraunhofer Institute for Open Communication Systems
  37. * Competence Center for Advanced Satellite Communications.
  38. * Bugfixes and robustness improvements.
  39. * Filtering on dest MAC addresses, if present (D-Bit = 0)
  40. * ULE_DEBUG compile-time option.
  41. * Apr 2006: cp v3: Bugfixes and compliency with RFC 4326 (ULE) by
  42. * Christian Praehauser <cpraehaus@cosy.sbg.ac.at>,
  43. * Paris Lodron University of Salzburg.
  44. */
  45. /*
  46. * FIXME / TODO (dvb_net.c):
  47. *
  48. * Unloading does not work for 2.6.9 kernels: a refcount doesn't go to zero.
  49. *
  50. */
  51. #define pr_fmt(fmt) "dvb_net: " fmt
  52. #include <linux/module.h>
  53. #include <linux/kernel.h>
  54. #include <linux/netdevice.h>
  55. #include <linux/etherdevice.h>
  56. #include <linux/dvb/net.h>
  57. #include <linux/uio.h>
  58. #include <linux/uaccess.h>
  59. #include <linux/crc32.h>
  60. #include <linux/mutex.h>
  61. #include <linux/sched.h>
  62. #include "dvb_demux.h"
  63. #include "dvb_net.h"
  64. static inline __u32 iov_crc32( __u32 c, struct kvec *iov, unsigned int cnt )
  65. {
  66. unsigned int j;
  67. for (j = 0; j < cnt; j++)
  68. c = crc32_be( c, iov[j].iov_base, iov[j].iov_len );
  69. return c;
  70. }
  71. #define DVB_NET_MULTICAST_MAX 10
  72. #undef ULE_DEBUG
  73. #ifdef ULE_DEBUG
  74. static void hexdump(const unsigned char *buf, unsigned short len)
  75. {
  76. print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true);
  77. }
  78. #endif
  79. struct dvb_net_priv {
  80. int in_use;
  81. u16 pid;
  82. struct net_device *net;
  83. struct dvb_net *host;
  84. struct dmx_demux *demux;
  85. struct dmx_section_feed *secfeed;
  86. struct dmx_section_filter *secfilter;
  87. struct dmx_ts_feed *tsfeed;
  88. int multi_num;
  89. struct dmx_section_filter *multi_secfilter[DVB_NET_MULTICAST_MAX];
  90. unsigned char multi_macs[DVB_NET_MULTICAST_MAX][6];
  91. int rx_mode;
  92. #define RX_MODE_UNI 0
  93. #define RX_MODE_MULTI 1
  94. #define RX_MODE_ALL_MULTI 2
  95. #define RX_MODE_PROMISC 3
  96. struct work_struct set_multicast_list_wq;
  97. struct work_struct restart_net_feed_wq;
  98. unsigned char feedtype; /* Either FEED_TYPE_ or FEED_TYPE_ULE */
  99. int need_pusi; /* Set to 1, if synchronization on PUSI required. */
  100. unsigned char tscc; /* TS continuity counter after sync on PUSI. */
  101. struct sk_buff *ule_skb; /* ULE SNDU decodes into this buffer. */
  102. unsigned char *ule_next_hdr; /* Pointer into skb to next ULE extension header. */
  103. unsigned short ule_sndu_len; /* ULE SNDU length in bytes, w/o D-Bit. */
  104. unsigned short ule_sndu_type; /* ULE SNDU type field, complete. */
  105. unsigned char ule_sndu_type_1; /* ULE SNDU type field, if split across 2 TS cells. */
  106. unsigned char ule_dbit; /* Whether the DestMAC address present
  107. * or not (bit is set). */
  108. unsigned char ule_bridged; /* Whether the ULE_BRIDGED extension header was found. */
  109. int ule_sndu_remain; /* Nr. of bytes still required for current ULE SNDU. */
  110. unsigned long ts_count; /* Current ts cell counter. */
  111. struct mutex mutex;
  112. };
  113. /**
  114. * Determine the packet's protocol ID. The rule here is that we
  115. * assume 802.3 if the type field is short enough to be a length.
  116. * This is normal practice and works for any 'now in use' protocol.
  117. *
  118. * stolen from eth.c out of the linux kernel, hacked for dvb-device
  119. * by Michael Holzt <kju@debian.org>
  120. */
  121. static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
  122. struct net_device *dev)
  123. {
  124. struct ethhdr *eth;
  125. unsigned char *rawp;
  126. skb_reset_mac_header(skb);
  127. skb_pull(skb,dev->hard_header_len);
  128. eth = eth_hdr(skb);
  129. if (*eth->h_dest & 1) {
  130. if(ether_addr_equal(eth->h_dest,dev->broadcast))
  131. skb->pkt_type=PACKET_BROADCAST;
  132. else
  133. skb->pkt_type=PACKET_MULTICAST;
  134. }
  135. if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
  136. return eth->h_proto;
  137. rawp = skb->data;
  138. /**
  139. * This is a magic hack to spot IPX packets. Older Novell breaks
  140. * the protocol design and runs IPX over 802.3 without an 802.2 LLC
  141. * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
  142. * won't work for fault tolerant netware but does for the rest.
  143. */
  144. if (*(unsigned short *)rawp == 0xFFFF)
  145. return htons(ETH_P_802_3);
  146. /**
  147. * Real 802.2 LLC
  148. */
  149. return htons(ETH_P_802_2);
  150. }
  151. #define TS_SZ 188
  152. #define TS_SYNC 0x47
  153. #define TS_TEI 0x80
  154. #define TS_SC 0xC0
  155. #define TS_PUSI 0x40
  156. #define TS_AF_A 0x20
  157. #define TS_AF_D 0x10
  158. /* ULE Extension Header handlers. */
  159. #define ULE_TEST 0
  160. #define ULE_BRIDGED 1
  161. #define ULE_OPTEXTHDR_PADDING 0
  162. static int ule_test_sndu( struct dvb_net_priv *p )
  163. {
  164. return -1;
  165. }
  166. static int ule_bridged_sndu( struct dvb_net_priv *p )
  167. {
  168. struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr;
  169. if(ntohs(hdr->h_proto) < ETH_P_802_3_MIN) {
  170. int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data);
  171. /* A frame Type < ETH_P_802_3_MIN for a bridged frame, introduces a LLC Length field. */
  172. if(framelen != ntohs(hdr->h_proto)) {
  173. return -1;
  174. }
  175. }
  176. /* Note:
  177. * From RFC4326:
  178. * "A bridged SNDU is a Mandatory Extension Header of Type 1.
  179. * It must be the final (or only) extension header specified in the header chain of a SNDU."
  180. * The 'ule_bridged' flag will cause the extension header processing loop to terminate.
  181. */
  182. p->ule_bridged = 1;
  183. return 0;
  184. }
  185. static int ule_exthdr_padding(struct dvb_net_priv *p)
  186. {
  187. return 0;
  188. }
  189. /** Handle ULE extension headers.
  190. * Function is called after a successful CRC32 verification of an ULE SNDU to complete its decoding.
  191. * Returns: >= 0: nr. of bytes consumed by next extension header
  192. * -1: Mandatory extension header that is not recognized or TEST SNDU; discard.
  193. */
  194. static int handle_one_ule_extension( struct dvb_net_priv *p )
  195. {
  196. /* Table of mandatory extension header handlers. The header type is the index. */
  197. static int (*ule_mandatory_ext_handlers[255])( struct dvb_net_priv *p ) =
  198. { [0] = ule_test_sndu, [1] = ule_bridged_sndu, [2] = NULL, };
  199. /* Table of optional extension header handlers. The header type is the index. */
  200. static int (*ule_optional_ext_handlers[255])( struct dvb_net_priv *p ) =
  201. { [0] = ule_exthdr_padding, [1] = NULL, };
  202. int ext_len = 0;
  203. unsigned char hlen = (p->ule_sndu_type & 0x0700) >> 8;
  204. unsigned char htype = p->ule_sndu_type & 0x00FF;
  205. /* Discriminate mandatory and optional extension headers. */
  206. if (hlen == 0) {
  207. /* Mandatory extension header */
  208. if (ule_mandatory_ext_handlers[htype]) {
  209. ext_len = ule_mandatory_ext_handlers[htype]( p );
  210. if(ext_len >= 0) {
  211. p->ule_next_hdr += ext_len;
  212. if (!p->ule_bridged) {
  213. p->ule_sndu_type = ntohs(*(__be16 *)p->ule_next_hdr);
  214. p->ule_next_hdr += 2;
  215. } else {
  216. p->ule_sndu_type = ntohs(*(__be16 *)(p->ule_next_hdr + ((p->ule_dbit ? 2 : 3) * ETH_ALEN)));
  217. /* This assures the extension handling loop will terminate. */
  218. }
  219. }
  220. // else: extension handler failed or SNDU should be discarded
  221. } else
  222. ext_len = -1; /* SNDU has to be discarded. */
  223. } else {
  224. /* Optional extension header. Calculate the length. */
  225. ext_len = hlen << 1;
  226. /* Process the optional extension header according to its type. */
  227. if (ule_optional_ext_handlers[htype])
  228. (void)ule_optional_ext_handlers[htype]( p );
  229. p->ule_next_hdr += ext_len;
  230. p->ule_sndu_type = ntohs( *(__be16 *)(p->ule_next_hdr-2) );
  231. /*
  232. * note: the length of the next header type is included in the
  233. * length of THIS optional extension header
  234. */
  235. }
  236. return ext_len;
  237. }
  238. static int handle_ule_extensions( struct dvb_net_priv *p )
  239. {
  240. int total_ext_len = 0, l;
  241. p->ule_next_hdr = p->ule_skb->data;
  242. do {
  243. l = handle_one_ule_extension( p );
  244. if (l < 0)
  245. return l; /* Stop extension header processing and discard SNDU. */
  246. total_ext_len += l;
  247. #ifdef ULE_DEBUG
  248. pr_debug("ule_next_hdr=%p, ule_sndu_type=%i, l=%i, total_ext_len=%i\n",
  249. p->ule_next_hdr, (int)p->ule_sndu_type,
  250. l, total_ext_len);
  251. #endif
  252. } while (p->ule_sndu_type < ETH_P_802_3_MIN);
  253. return total_ext_len;
  254. }
  255. /** Prepare for a new ULE SNDU: reset the decoder state. */
  256. static inline void reset_ule( struct dvb_net_priv *p )
  257. {
  258. p->ule_skb = NULL;
  259. p->ule_next_hdr = NULL;
  260. p->ule_sndu_len = 0;
  261. p->ule_sndu_type = 0;
  262. p->ule_sndu_type_1 = 0;
  263. p->ule_sndu_remain = 0;
  264. p->ule_dbit = 0xFF;
  265. p->ule_bridged = 0;
  266. }
  267. /**
  268. * Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of
  269. * TS cells of a single PID.
  270. */
  271. struct dvb_net_ule_handle {
  272. struct net_device *dev;
  273. struct dvb_net_priv *priv;
  274. struct ethhdr *ethh;
  275. const u8 *buf;
  276. size_t buf_len;
  277. unsigned long skipped;
  278. const u8 *ts, *ts_end, *from_where;
  279. u8 ts_remain, how_much, new_ts;
  280. bool error;
  281. #ifdef ULE_DEBUG
  282. /*
  283. * The code inside ULE_DEBUG keeps a history of the
  284. * last 100 TS cells processed.
  285. */
  286. static unsigned char ule_hist[100*TS_SZ];
  287. static unsigned char *ule_where = ule_hist, ule_dump;
  288. #endif
  289. };
  290. static int dvb_net_ule_new_ts_cell(struct dvb_net_ule_handle *h)
  291. {
  292. /* We are about to process a new TS cell. */
  293. #ifdef ULE_DEBUG
  294. if (h->ule_where >= &h->ule_hist[100*TS_SZ])
  295. h->ule_where = h->ule_hist;
  296. memcpy(h->ule_where, h->ts, TS_SZ);
  297. if (h->ule_dump) {
  298. hexdump(h->ule_where, TS_SZ);
  299. h->ule_dump = 0;
  300. }
  301. h->ule_where += TS_SZ;
  302. #endif
  303. /*
  304. * Check TS h->error conditions: sync_byte, transport_error_indicator,
  305. * scrambling_control .
  306. */
  307. if ((h->ts[0] != TS_SYNC) || (h->ts[1] & TS_TEI) ||
  308. ((h->ts[3] & TS_SC) != 0)) {
  309. pr_warn("%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n",
  310. h->priv->ts_count, h->ts[0],
  311. (h->ts[1] & TS_TEI) >> 7,
  312. (h->ts[3] & TS_SC) >> 6);
  313. /* Drop partly decoded SNDU, reset state, resync on PUSI. */
  314. if (h->priv->ule_skb) {
  315. dev_kfree_skb(h->priv->ule_skb);
  316. /* Prepare for next SNDU. */
  317. h->dev->stats.rx_errors++;
  318. h->dev->stats.rx_frame_errors++;
  319. }
  320. reset_ule(h->priv);
  321. h->priv->need_pusi = 1;
  322. /* Continue with next TS cell. */
  323. h->ts += TS_SZ;
  324. h->priv->ts_count++;
  325. return 1;
  326. }
  327. h->ts_remain = 184;
  328. h->from_where = h->ts + 4;
  329. return 0;
  330. }
  331. static int dvb_net_ule_ts_pusi(struct dvb_net_ule_handle *h)
  332. {
  333. if (h->ts[1] & TS_PUSI) {
  334. /* Find beginning of first ULE SNDU in current TS cell. */
  335. /* Synchronize continuity counter. */
  336. h->priv->tscc = h->ts[3] & 0x0F;
  337. /* There is a pointer field here. */
  338. if (h->ts[4] > h->ts_remain) {
  339. pr_err("%lu: Invalid ULE packet (pointer field %d)\n",
  340. h->priv->ts_count, h->ts[4]);
  341. h->ts += TS_SZ;
  342. h->priv->ts_count++;
  343. return 1;
  344. }
  345. /* Skip to destination of pointer field. */
  346. h->from_where = &h->ts[5] + h->ts[4];
  347. h->ts_remain -= 1 + h->ts[4];
  348. h->skipped = 0;
  349. } else {
  350. h->skipped++;
  351. h->ts += TS_SZ;
  352. h->priv->ts_count++;
  353. return 1;
  354. }
  355. return 0;
  356. }
  357. static int dvb_net_ule_new_ts(struct dvb_net_ule_handle *h)
  358. {
  359. /* Check continuity counter. */
  360. if ((h->ts[3] & 0x0F) == h->priv->tscc)
  361. h->priv->tscc = (h->priv->tscc + 1) & 0x0F;
  362. else {
  363. /* TS discontinuity handling: */
  364. pr_warn("%lu: TS discontinuity: got %#x, expected %#x.\n",
  365. h->priv->ts_count, h->ts[3] & 0x0F,
  366. h->priv->tscc);
  367. /* Drop partly decoded SNDU, reset state, resync on PUSI. */
  368. if (h->priv->ule_skb) {
  369. dev_kfree_skb(h->priv->ule_skb);
  370. /* Prepare for next SNDU. */
  371. // reset_ule(h->priv); moved to below.
  372. h->dev->stats.rx_errors++;
  373. h->dev->stats.rx_frame_errors++;
  374. }
  375. reset_ule(h->priv);
  376. /* skip to next PUSI. */
  377. h->priv->need_pusi = 1;
  378. return 1;
  379. }
  380. /*
  381. * If we still have an incomplete payload, but PUSI is
  382. * set; some TS cells are missing.
  383. * This is only possible here, if we missed exactly 16 TS
  384. * cells (continuity counter wrap).
  385. */
  386. if (h->ts[1] & TS_PUSI) {
  387. if (!h->priv->need_pusi) {
  388. if (!(*h->from_where < (h->ts_remain-1)) ||
  389. *h->from_where != h->priv->ule_sndu_remain) {
  390. /*
  391. * Pointer field is invalid.
  392. * Drop this TS cell and any started ULE SNDU.
  393. */
  394. pr_warn("%lu: Invalid pointer field: %u.\n",
  395. h->priv->ts_count,
  396. *h->from_where);
  397. /*
  398. * Drop partly decoded SNDU, reset state,
  399. * resync on PUSI.
  400. */
  401. if (h->priv->ule_skb) {
  402. h->error = true;
  403. dev_kfree_skb(h->priv->ule_skb);
  404. }
  405. if (h->error || h->priv->ule_sndu_remain) {
  406. h->dev->stats.rx_errors++;
  407. h->dev->stats.rx_frame_errors++;
  408. h->error = false;
  409. }
  410. reset_ule(h->priv);
  411. h->priv->need_pusi = 1;
  412. return 1;
  413. }
  414. /*
  415. * Skip pointer field (we're processing a
  416. * packed payload).
  417. */
  418. h->from_where += 1;
  419. h->ts_remain -= 1;
  420. } else
  421. h->priv->need_pusi = 0;
  422. if (h->priv->ule_sndu_remain > 183) {
  423. /*
  424. * Current SNDU lacks more data than there
  425. * could be available in the current TS cell.
  426. */
  427. h->dev->stats.rx_errors++;
  428. h->dev->stats.rx_length_errors++;
  429. pr_warn("%lu: Expected %d more SNDU bytes, but got PUSI (pf %d, h->ts_remain %d). Flushing incomplete payload.\n",
  430. h->priv->ts_count,
  431. h->priv->ule_sndu_remain,
  432. h->ts[4], h->ts_remain);
  433. dev_kfree_skb(h->priv->ule_skb);
  434. /* Prepare for next SNDU. */
  435. reset_ule(h->priv);
  436. /*
  437. * Resync: go to where pointer field points to:
  438. * start of next ULE SNDU.
  439. */
  440. h->from_where += h->ts[4];
  441. h->ts_remain -= h->ts[4];
  442. }
  443. }
  444. return 0;
  445. }
  446. /*
  447. * Start a new payload with skb.
  448. * Find ULE header. It is only guaranteed that the
  449. * length field (2 bytes) is contained in the current
  450. * TS.
  451. * Check h.ts_remain has to be >= 2 here.
  452. */
  453. static int dvb_net_ule_new_payload(struct dvb_net_ule_handle *h)
  454. {
  455. if (h->ts_remain < 2) {
  456. pr_warn("Invalid payload packing: only %d bytes left in TS. Resyncing.\n",
  457. h->ts_remain);
  458. h->priv->ule_sndu_len = 0;
  459. h->priv->need_pusi = 1;
  460. h->ts += TS_SZ;
  461. return 1;
  462. }
  463. if (!h->priv->ule_sndu_len) {
  464. /* Got at least two bytes, thus extrace the SNDU length. */
  465. h->priv->ule_sndu_len = h->from_where[0] << 8 |
  466. h->from_where[1];
  467. if (h->priv->ule_sndu_len & 0x8000) {
  468. /* D-Bit is set: no dest mac present. */
  469. h->priv->ule_sndu_len &= 0x7FFF;
  470. h->priv->ule_dbit = 1;
  471. } else
  472. h->priv->ule_dbit = 0;
  473. if (h->priv->ule_sndu_len < 5) {
  474. pr_warn("%lu: Invalid ULE SNDU length %u. Resyncing.\n",
  475. h->priv->ts_count,
  476. h->priv->ule_sndu_len);
  477. h->dev->stats.rx_errors++;
  478. h->dev->stats.rx_length_errors++;
  479. h->priv->ule_sndu_len = 0;
  480. h->priv->need_pusi = 1;
  481. h->new_ts = 1;
  482. h->ts += TS_SZ;
  483. h->priv->ts_count++;
  484. return 1;
  485. }
  486. h->ts_remain -= 2; /* consume the 2 bytes SNDU length. */
  487. h->from_where += 2;
  488. }
  489. h->priv->ule_sndu_remain = h->priv->ule_sndu_len + 2;
  490. /*
  491. * State of current TS:
  492. * h->ts_remain (remaining bytes in the current TS cell)
  493. * 0 ule_type is not available now, we need the next TS cell
  494. * 1 the first byte of the ule_type is present
  495. * >=2 full ULE header present, maybe some payload data as well.
  496. */
  497. switch (h->ts_remain) {
  498. case 1:
  499. h->priv->ule_sndu_remain--;
  500. h->priv->ule_sndu_type = h->from_where[0] << 8;
  501. /* first byte of ule_type is set. */
  502. h->priv->ule_sndu_type_1 = 1;
  503. h->ts_remain -= 1;
  504. h->from_where += 1;
  505. /* fallthrough */
  506. case 0:
  507. h->new_ts = 1;
  508. h->ts += TS_SZ;
  509. h->priv->ts_count++;
  510. return 1;
  511. default: /* complete ULE header is present in current TS. */
  512. /* Extract ULE type field. */
  513. if (h->priv->ule_sndu_type_1) {
  514. h->priv->ule_sndu_type_1 = 0;
  515. h->priv->ule_sndu_type |= h->from_where[0];
  516. h->from_where += 1; /* points to payload start. */
  517. h->ts_remain -= 1;
  518. } else {
  519. /* Complete type is present in new TS. */
  520. h->priv->ule_sndu_type = h->from_where[0] << 8 |
  521. h->from_where[1];
  522. h->from_where += 2; /* points to payload start. */
  523. h->ts_remain -= 2;
  524. }
  525. break;
  526. }
  527. /*
  528. * Allocate the skb (decoder target buffer) with the correct size,
  529. * as follows:
  530. *
  531. * prepare for the largest case: bridged SNDU with MAC address
  532. * (dbit = 0).
  533. */
  534. h->priv->ule_skb = dev_alloc_skb(h->priv->ule_sndu_len +
  535. ETH_HLEN + ETH_ALEN);
  536. if (!h->priv->ule_skb) {
  537. pr_notice("%s: Memory squeeze, dropping packet.\n",
  538. h->dev->name);
  539. h->dev->stats.rx_dropped++;
  540. return -1;
  541. }
  542. /* This includes the CRC32 _and_ dest mac, if !dbit. */
  543. h->priv->ule_sndu_remain = h->priv->ule_sndu_len;
  544. h->priv->ule_skb->dev = h->dev;
  545. /*
  546. * Leave space for Ethernet or bridged SNDU header
  547. * (eth hdr plus one MAC addr).
  548. */
  549. skb_reserve(h->priv->ule_skb, ETH_HLEN + ETH_ALEN);
  550. return 0;
  551. }
  552. static int dvb_net_ule_should_drop(struct dvb_net_ule_handle *h)
  553. {
  554. static const u8 bc_addr[ETH_ALEN] = { [0 ... ETH_ALEN - 1] = 0xff };
  555. /*
  556. * The destination MAC address is the next data in the skb. It comes
  557. * before any extension headers.
  558. *
  559. * Check if the payload of this SNDU should be passed up the stack.
  560. */
  561. if (h->priv->rx_mode == RX_MODE_PROMISC)
  562. return 0;
  563. if (h->priv->ule_skb->data[0] & 0x01) {
  564. /* multicast or broadcast */
  565. if (!ether_addr_equal(h->priv->ule_skb->data, bc_addr)) {
  566. /* multicast */
  567. if (h->priv->rx_mode == RX_MODE_MULTI) {
  568. int i;
  569. for (i = 0; i < h->priv->multi_num &&
  570. !ether_addr_equal(h->priv->ule_skb->data,
  571. h->priv->multi_macs[i]);
  572. i++)
  573. ;
  574. if (i == h->priv->multi_num)
  575. return 1;
  576. } else if (h->priv->rx_mode != RX_MODE_ALL_MULTI)
  577. return 1; /* no broadcast; */
  578. /*
  579. * else:
  580. * all multicast mode: accept all multicast packets
  581. */
  582. }
  583. /* else: broadcast */
  584. } else if (!ether_addr_equal(h->priv->ule_skb->data, h->dev->dev_addr))
  585. return 1;
  586. return 0;
  587. }
  588. static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
  589. u32 ule_crc, u32 expected_crc)
  590. {
  591. u8 dest_addr[ETH_ALEN];
  592. if (ule_crc != expected_crc) {
  593. pr_warn("%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n",
  594. h->priv->ts_count, ule_crc, expected_crc,
  595. h->priv->ule_sndu_len, h->priv->ule_sndu_type,
  596. h->ts_remain,
  597. h->ts_remain > 2 ?
  598. *(unsigned short *)h->from_where : 0);
  599. #ifdef ULE_DEBUG
  600. hexdump(iov[0].iov_base, iov[0].iov_len);
  601. hexdump(iov[1].iov_base, iov[1].iov_len);
  602. hexdump(iov[2].iov_base, iov[2].iov_len);
  603. if (h->ule_where == h->ule_hist) {
  604. hexdump(&h->ule_hist[98*TS_SZ], TS_SZ);
  605. hexdump(&h->ule_hist[99*TS_SZ], TS_SZ);
  606. } else if (h->ule_where == &h->ule_hist[TS_SZ]) {
  607. hexdump(&h->ule_hist[99*TS_SZ], TS_SZ);
  608. hexdump(h->ule_hist, TS_SZ);
  609. } else {
  610. hexdump(h->ule_where - TS_SZ - TS_SZ, TS_SZ);
  611. hexdump(h->ule_where - TS_SZ, TS_SZ);
  612. }
  613. h->ule_dump = 1;
  614. #endif
  615. h->dev->stats.rx_errors++;
  616. h->dev->stats.rx_crc_errors++;
  617. dev_kfree_skb(h->priv->ule_skb);
  618. return;
  619. }
  620. /* CRC32 verified OK. */
  621. /* CRC32 was OK, so remove it from skb. */
  622. h->priv->ule_skb->tail -= 4;
  623. h->priv->ule_skb->len -= 4;
  624. if (!h->priv->ule_dbit) {
  625. if (dvb_net_ule_should_drop(h)) {
  626. #ifdef ULE_DEBUG
  627. netdev_dbg(h->dev,
  628. "Dropping SNDU: MAC destination address does not match: dest addr: %pM, h->dev addr: %pM\n",
  629. h->priv->ule_skb->data, h->dev->dev_addr);
  630. #endif
  631. dev_kfree_skb(h->priv->ule_skb);
  632. return;
  633. }
  634. skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
  635. ETH_ALEN);
  636. skb_pull(h->priv->ule_skb, ETH_ALEN);
  637. } else {
  638. /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
  639. eth_zero_addr(dest_addr);
  640. }
  641. /* Handle ULE Extension Headers. */
  642. if (h->priv->ule_sndu_type < ETH_P_802_3_MIN) {
  643. /* There is an extension header. Handle it accordingly. */
  644. int l = handle_ule_extensions(h->priv);
  645. if (l < 0) {
  646. /*
  647. * Mandatory extension header unknown or TEST SNDU.
  648. * Drop it.
  649. */
  650. // pr_warn("Dropping SNDU, extension headers.\n" );
  651. dev_kfree_skb(h->priv->ule_skb);
  652. return;
  653. }
  654. skb_pull(h->priv->ule_skb, l);
  655. }
  656. /*
  657. * Construct/assure correct ethernet header.
  658. * Note: in bridged mode (h->priv->ule_bridged != 0)
  659. * we already have the (original) ethernet
  660. * header at the start of the payload (after
  661. * optional dest. address and any extension
  662. * headers).
  663. */
  664. if (!h->priv->ule_bridged) {
  665. skb_push(h->priv->ule_skb, ETH_HLEN);
  666. h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
  667. memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
  668. eth_zero_addr(h->ethh->h_source);
  669. h->ethh->h_proto = htons(h->priv->ule_sndu_type);
  670. }
  671. /* else: skb is in correct state; nothing to do. */
  672. h->priv->ule_bridged = 0;
  673. /* Stuff into kernel's protocol stack. */
  674. h->priv->ule_skb->protocol = dvb_net_eth_type_trans(h->priv->ule_skb,
  675. h->dev);
  676. /*
  677. * If D-bit is set (i.e. destination MAC address not present),
  678. * receive the packet anyhow.
  679. */
  680. #if 0
  681. if (h->priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
  682. h->priv->ule_skb->pkt_type = PACKET_HOST;
  683. #endif
  684. h->dev->stats.rx_packets++;
  685. h->dev->stats.rx_bytes += h->priv->ule_skb->len;
  686. netif_rx(h->priv->ule_skb);
  687. }
  688. static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len)
  689. {
  690. int ret;
  691. struct dvb_net_ule_handle h = {
  692. .dev = dev,
  693. .buf = buf,
  694. .buf_len = buf_len,
  695. .skipped = 0L,
  696. .ts = NULL,
  697. .ts_end = NULL,
  698. .from_where = NULL,
  699. .ts_remain = 0,
  700. .how_much = 0,
  701. .new_ts = 1,
  702. .ethh = NULL,
  703. .error = false,
  704. #ifdef ULE_DEBUG
  705. .ule_where = ule_hist,
  706. #endif
  707. };
  708. /*
  709. * For all TS cells in current buffer.
  710. * Appearently, we are called for every single TS cell.
  711. */
  712. for (h.ts = h.buf, h.ts_end = h.buf + h.buf_len;
  713. h.ts < h.ts_end; /* no incr. */) {
  714. if (h.new_ts) {
  715. /* We are about to process a new TS cell. */
  716. if (dvb_net_ule_new_ts_cell(&h))
  717. continue;
  718. }
  719. /* Synchronize on PUSI, if required. */
  720. if (h.priv->need_pusi) {
  721. if (dvb_net_ule_ts_pusi(&h))
  722. continue;
  723. }
  724. if (h.new_ts) {
  725. if (dvb_net_ule_new_ts(&h))
  726. continue;
  727. }
  728. /* Check if new payload needs to be started. */
  729. if (h.priv->ule_skb == NULL) {
  730. ret = dvb_net_ule_new_payload(&h);
  731. if (ret < 0)
  732. return;
  733. if (ret)
  734. continue;
  735. }
  736. /* Copy data into our current skb. */
  737. h.how_much = min(h.priv->ule_sndu_remain, (int)h.ts_remain);
  738. memcpy(skb_put(h.priv->ule_skb, h.how_much),
  739. h.from_where, h.how_much);
  740. h.priv->ule_sndu_remain -= h.how_much;
  741. h.ts_remain -= h.how_much;
  742. h.from_where += h.how_much;
  743. /* Check for complete payload. */
  744. if (h.priv->ule_sndu_remain <= 0) {
  745. /* Check CRC32, we've got it in our skb already. */
  746. __be16 ulen = htons(h.priv->ule_sndu_len);
  747. __be16 utype = htons(h.priv->ule_sndu_type);
  748. const u8 *tail;
  749. struct kvec iov[3] = {
  750. { &ulen, sizeof ulen },
  751. { &utype, sizeof utype },
  752. { h.priv->ule_skb->data,
  753. h.priv->ule_skb->len - 4 }
  754. };
  755. u32 ule_crc = ~0L, expected_crc;
  756. if (h.priv->ule_dbit) {
  757. /* Set D-bit for CRC32 verification,
  758. * if it was set originally. */
  759. ulen |= htons(0x8000);
  760. }
  761. ule_crc = iov_crc32(ule_crc, iov, 3);
  762. tail = skb_tail_pointer(h.priv->ule_skb);
  763. expected_crc = *(tail - 4) << 24 |
  764. *(tail - 3) << 16 |
  765. *(tail - 2) << 8 |
  766. *(tail - 1);
  767. dvb_net_ule_check_crc(&h, ule_crc, expected_crc);
  768. /* Prepare for next SNDU. */
  769. reset_ule(h.priv);
  770. }
  771. /* More data in current TS (look at the bytes following the CRC32)? */
  772. if (h.ts_remain >= 2 && *((unsigned short *)h.from_where) != 0xFFFF) {
  773. /* Next ULE SNDU starts right there. */
  774. h.new_ts = 0;
  775. h.priv->ule_skb = NULL;
  776. h.priv->ule_sndu_type_1 = 0;
  777. h.priv->ule_sndu_len = 0;
  778. // pr_warn("More data in current TS: [%#x %#x %#x %#x]\n",
  779. // *(h.from_where + 0), *(h.from_where + 1),
  780. // *(h.from_where + 2), *(h.from_where + 3));
  781. // pr_warn("h.ts @ %p, stopped @ %p:\n", h.ts, h.from_where + 0);
  782. // hexdump(h.ts, 188);
  783. } else {
  784. h.new_ts = 1;
  785. h.ts += TS_SZ;
  786. h.priv->ts_count++;
  787. if (h.priv->ule_skb == NULL) {
  788. h.priv->need_pusi = 1;
  789. h.priv->ule_sndu_type_1 = 0;
  790. h.priv->ule_sndu_len = 0;
  791. }
  792. }
  793. } /* for all available TS cells */
  794. }
  795. static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
  796. const u8 *buffer2, size_t buffer2_len,
  797. struct dmx_ts_feed *feed)
  798. {
  799. struct net_device *dev = feed->priv;
  800. if (buffer2)
  801. pr_warn("buffer2 not NULL: %p.\n", buffer2);
  802. if (buffer1_len > 32768)
  803. pr_warn("length > 32k: %zu.\n", buffer1_len);
  804. /* pr_info("TS callback: %u bytes, %u TS cells @ %p.\n",
  805. buffer1_len, buffer1_len / TS_SZ, buffer1); */
  806. dvb_net_ule(dev, buffer1, buffer1_len);
  807. return 0;
  808. }
  809. static void dvb_net_sec(struct net_device *dev,
  810. const u8 *pkt, int pkt_len)
  811. {
  812. u8 *eth;
  813. struct sk_buff *skb;
  814. struct net_device_stats *stats = &dev->stats;
  815. int snap = 0;
  816. /* note: pkt_len includes a 32bit checksum */
  817. if (pkt_len < 16) {
  818. pr_warn("%s: IP/MPE packet length = %d too small.\n",
  819. dev->name, pkt_len);
  820. stats->rx_errors++;
  821. stats->rx_length_errors++;
  822. return;
  823. }
  824. /* it seems some ISPs manage to screw up here, so we have to
  825. * relax the error checks... */
  826. #if 0
  827. if ((pkt[5] & 0xfd) != 0xc1) {
  828. /* drop scrambled or broken packets */
  829. #else
  830. if ((pkt[5] & 0x3c) != 0x00) {
  831. /* drop scrambled */
  832. #endif
  833. stats->rx_errors++;
  834. stats->rx_crc_errors++;
  835. return;
  836. }
  837. if (pkt[5] & 0x02) {
  838. /* handle LLC/SNAP, see rfc-1042 */
  839. if (pkt_len < 24 || memcmp(&pkt[12], "\xaa\xaa\x03\0\0\0", 6)) {
  840. stats->rx_dropped++;
  841. return;
  842. }
  843. snap = 8;
  844. }
  845. if (pkt[7]) {
  846. /* FIXME: assemble datagram from multiple sections */
  847. stats->rx_errors++;
  848. stats->rx_frame_errors++;
  849. return;
  850. }
  851. /* we have 14 byte ethernet header (ip header follows);
  852. * 12 byte MPE header; 4 byte checksum; + 2 byte alignment, 8 byte LLC/SNAP
  853. */
  854. if (!(skb = dev_alloc_skb(pkt_len - 4 - 12 + 14 + 2 - snap))) {
  855. //pr_notice("%s: Memory squeeze, dropping packet.\n", dev->name);
  856. stats->rx_dropped++;
  857. return;
  858. }
  859. skb_reserve(skb, 2); /* longword align L3 header */
  860. skb->dev = dev;
  861. /* copy L3 payload */
  862. eth = (u8 *) skb_put(skb, pkt_len - 12 - 4 + 14 - snap);
  863. memcpy(eth + 14, pkt + 12 + snap, pkt_len - 12 - 4 - snap);
  864. /* create ethernet header: */
  865. eth[0]=pkt[0x0b];
  866. eth[1]=pkt[0x0a];
  867. eth[2]=pkt[0x09];
  868. eth[3]=pkt[0x08];
  869. eth[4]=pkt[0x04];
  870. eth[5]=pkt[0x03];
  871. eth[6]=eth[7]=eth[8]=eth[9]=eth[10]=eth[11]=0;
  872. if (snap) {
  873. eth[12] = pkt[18];
  874. eth[13] = pkt[19];
  875. } else {
  876. /* protocol numbers are from rfc-1700 or
  877. * http://www.iana.org/assignments/ethernet-numbers
  878. */
  879. if (pkt[12] >> 4 == 6) { /* version field from IP header */
  880. eth[12] = 0x86; /* IPv6 */
  881. eth[13] = 0xdd;
  882. } else {
  883. eth[12] = 0x08; /* IPv4 */
  884. eth[13] = 0x00;
  885. }
  886. }
  887. skb->protocol = dvb_net_eth_type_trans(skb, dev);
  888. stats->rx_packets++;
  889. stats->rx_bytes+=skb->len;
  890. netif_rx(skb);
  891. }
  892. static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
  893. const u8 *buffer2, size_t buffer2_len,
  894. struct dmx_section_filter *filter)
  895. {
  896. struct net_device *dev = filter->priv;
  897. /**
  898. * we rely on the DVB API definition where exactly one complete
  899. * section is delivered in buffer1
  900. */
  901. dvb_net_sec (dev, buffer1, buffer1_len);
  902. return 0;
  903. }
  904. static int dvb_net_tx(struct sk_buff *skb, struct net_device *dev)
  905. {
  906. dev_kfree_skb(skb);
  907. return NETDEV_TX_OK;
  908. }
  909. static u8 mask_normal[6]={0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  910. static u8 mask_allmulti[6]={0xff, 0xff, 0xff, 0x00, 0x00, 0x00};
  911. static u8 mac_allmulti[6]={0x01, 0x00, 0x5e, 0x00, 0x00, 0x00};
  912. static u8 mask_promisc[6]={0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
  913. static int dvb_net_filter_sec_set(struct net_device *dev,
  914. struct dmx_section_filter **secfilter,
  915. u8 *mac, u8 *mac_mask)
  916. {
  917. struct dvb_net_priv *priv = netdev_priv(dev);
  918. int ret;
  919. *secfilter=NULL;
  920. ret = priv->secfeed->allocate_filter(priv->secfeed, secfilter);
  921. if (ret<0) {
  922. pr_err("%s: could not get filter\n", dev->name);
  923. return ret;
  924. }
  925. (*secfilter)->priv=(void *) dev;
  926. memset((*secfilter)->filter_value, 0x00, DMX_MAX_FILTER_SIZE);
  927. memset((*secfilter)->filter_mask, 0x00, DMX_MAX_FILTER_SIZE);
  928. memset((*secfilter)->filter_mode, 0xff, DMX_MAX_FILTER_SIZE);
  929. (*secfilter)->filter_value[0]=0x3e;
  930. (*secfilter)->filter_value[3]=mac[5];
  931. (*secfilter)->filter_value[4]=mac[4];
  932. (*secfilter)->filter_value[8]=mac[3];
  933. (*secfilter)->filter_value[9]=mac[2];
  934. (*secfilter)->filter_value[10]=mac[1];
  935. (*secfilter)->filter_value[11]=mac[0];
  936. (*secfilter)->filter_mask[0] = 0xff;
  937. (*secfilter)->filter_mask[3] = mac_mask[5];
  938. (*secfilter)->filter_mask[4] = mac_mask[4];
  939. (*secfilter)->filter_mask[8] = mac_mask[3];
  940. (*secfilter)->filter_mask[9] = mac_mask[2];
  941. (*secfilter)->filter_mask[10] = mac_mask[1];
  942. (*secfilter)->filter_mask[11]=mac_mask[0];
  943. netdev_dbg(dev, "filter mac=%pM mask=%pM\n", mac, mac_mask);
  944. return 0;
  945. }
  946. static int dvb_net_feed_start(struct net_device *dev)
  947. {
  948. int ret = 0, i;
  949. struct dvb_net_priv *priv = netdev_priv(dev);
  950. struct dmx_demux *demux = priv->demux;
  951. unsigned char *mac = (unsigned char *) dev->dev_addr;
  952. netdev_dbg(dev, "rx_mode %i\n", priv->rx_mode);
  953. mutex_lock(&priv->mutex);
  954. if (priv->tsfeed || priv->secfeed || priv->secfilter || priv->multi_secfilter[0])
  955. pr_err("%s: BUG %d\n", __func__, __LINE__);
  956. priv->secfeed=NULL;
  957. priv->secfilter=NULL;
  958. priv->tsfeed = NULL;
  959. if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) {
  960. netdev_dbg(dev, "alloc secfeed\n");
  961. ret=demux->allocate_section_feed(demux, &priv->secfeed,
  962. dvb_net_sec_callback);
  963. if (ret<0) {
  964. pr_err("%s: could not allocate section feed\n",
  965. dev->name);
  966. goto error;
  967. }
  968. ret = priv->secfeed->set(priv->secfeed, priv->pid, 1);
  969. if (ret<0) {
  970. pr_err("%s: could not set section feed\n", dev->name);
  971. priv->demux->release_section_feed(priv->demux, priv->secfeed);
  972. priv->secfeed=NULL;
  973. goto error;
  974. }
  975. if (priv->rx_mode != RX_MODE_PROMISC) {
  976. netdev_dbg(dev, "set secfilter\n");
  977. dvb_net_filter_sec_set(dev, &priv->secfilter, mac, mask_normal);
  978. }
  979. switch (priv->rx_mode) {
  980. case RX_MODE_MULTI:
  981. for (i = 0; i < priv->multi_num; i++) {
  982. netdev_dbg(dev, "set multi_secfilter[%d]\n", i);
  983. dvb_net_filter_sec_set(dev, &priv->multi_secfilter[i],
  984. priv->multi_macs[i], mask_normal);
  985. }
  986. break;
  987. case RX_MODE_ALL_MULTI:
  988. priv->multi_num=1;
  989. netdev_dbg(dev, "set multi_secfilter[0]\n");
  990. dvb_net_filter_sec_set(dev, &priv->multi_secfilter[0],
  991. mac_allmulti, mask_allmulti);
  992. break;
  993. case RX_MODE_PROMISC:
  994. priv->multi_num=0;
  995. netdev_dbg(dev, "set secfilter\n");
  996. dvb_net_filter_sec_set(dev, &priv->secfilter, mac, mask_promisc);
  997. break;
  998. }
  999. netdev_dbg(dev, "start filtering\n");
  1000. priv->secfeed->start_filtering(priv->secfeed);
  1001. } else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
  1002. ktime_t timeout = ns_to_ktime(10 * NSEC_PER_MSEC);
  1003. /* we have payloads encapsulated in TS */
  1004. netdev_dbg(dev, "alloc tsfeed\n");
  1005. ret = demux->allocate_ts_feed(demux, &priv->tsfeed, dvb_net_ts_callback);
  1006. if (ret < 0) {
  1007. pr_err("%s: could not allocate ts feed\n", dev->name);
  1008. goto error;
  1009. }
  1010. /* Set netdevice pointer for ts decaps callback. */
  1011. priv->tsfeed->priv = (void *)dev;
  1012. ret = priv->tsfeed->set(priv->tsfeed,
  1013. priv->pid, /* pid */
  1014. TS_PACKET, /* type */
  1015. DMX_PES_OTHER, /* pes type */
  1016. timeout /* timeout */
  1017. );
  1018. if (ret < 0) {
  1019. pr_err("%s: could not set ts feed\n", dev->name);
  1020. priv->demux->release_ts_feed(priv->demux, priv->tsfeed);
  1021. priv->tsfeed = NULL;
  1022. goto error;
  1023. }
  1024. netdev_dbg(dev, "start filtering\n");
  1025. priv->tsfeed->start_filtering(priv->tsfeed);
  1026. } else
  1027. ret = -EINVAL;
  1028. error:
  1029. mutex_unlock(&priv->mutex);
  1030. return ret;
  1031. }
  1032. static int dvb_net_feed_stop(struct net_device *dev)
  1033. {
  1034. struct dvb_net_priv *priv = netdev_priv(dev);
  1035. int i, ret = 0;
  1036. mutex_lock(&priv->mutex);
  1037. if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) {
  1038. if (priv->secfeed) {
  1039. if (priv->secfeed->is_filtering) {
  1040. netdev_dbg(dev, "stop secfeed\n");
  1041. priv->secfeed->stop_filtering(priv->secfeed);
  1042. }
  1043. if (priv->secfilter) {
  1044. netdev_dbg(dev, "release secfilter\n");
  1045. priv->secfeed->release_filter(priv->secfeed,
  1046. priv->secfilter);
  1047. priv->secfilter=NULL;
  1048. }
  1049. for (i=0; i<priv->multi_num; i++) {
  1050. if (priv->multi_secfilter[i]) {
  1051. netdev_dbg(dev, "release multi_filter[%d]\n",
  1052. i);
  1053. priv->secfeed->release_filter(priv->secfeed,
  1054. priv->multi_secfilter[i]);
  1055. priv->multi_secfilter[i] = NULL;
  1056. }
  1057. }
  1058. priv->demux->release_section_feed(priv->demux, priv->secfeed);
  1059. priv->secfeed = NULL;
  1060. } else
  1061. pr_err("%s: no feed to stop\n", dev->name);
  1062. } else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
  1063. if (priv->tsfeed) {
  1064. if (priv->tsfeed->is_filtering) {
  1065. netdev_dbg(dev, "stop tsfeed\n");
  1066. priv->tsfeed->stop_filtering(priv->tsfeed);
  1067. }
  1068. priv->demux->release_ts_feed(priv->demux, priv->tsfeed);
  1069. priv->tsfeed = NULL;
  1070. }
  1071. else
  1072. pr_err("%s: no ts feed to stop\n", dev->name);
  1073. } else
  1074. ret = -EINVAL;
  1075. mutex_unlock(&priv->mutex);
  1076. return ret;
  1077. }
  1078. static int dvb_set_mc_filter(struct net_device *dev, unsigned char *addr)
  1079. {
  1080. struct dvb_net_priv *priv = netdev_priv(dev);
  1081. if (priv->multi_num == DVB_NET_MULTICAST_MAX)
  1082. return -ENOMEM;
  1083. memcpy(priv->multi_macs[priv->multi_num], addr, ETH_ALEN);
  1084. priv->multi_num++;
  1085. return 0;
  1086. }
  1087. static void wq_set_multicast_list (struct work_struct *work)
  1088. {
  1089. struct dvb_net_priv *priv =
  1090. container_of(work, struct dvb_net_priv, set_multicast_list_wq);
  1091. struct net_device *dev = priv->net;
  1092. dvb_net_feed_stop(dev);
  1093. priv->rx_mode = RX_MODE_UNI;
  1094. netif_addr_lock_bh(dev);
  1095. if (dev->flags & IFF_PROMISC) {
  1096. netdev_dbg(dev, "promiscuous mode\n");
  1097. priv->rx_mode = RX_MODE_PROMISC;
  1098. } else if ((dev->flags & IFF_ALLMULTI)) {
  1099. netdev_dbg(dev, "allmulti mode\n");
  1100. priv->rx_mode = RX_MODE_ALL_MULTI;
  1101. } else if (!netdev_mc_empty(dev)) {
  1102. struct netdev_hw_addr *ha;
  1103. netdev_dbg(dev, "set_mc_list, %d entries\n",
  1104. netdev_mc_count(dev));
  1105. priv->rx_mode = RX_MODE_MULTI;
  1106. priv->multi_num = 0;
  1107. netdev_for_each_mc_addr(ha, dev)
  1108. dvb_set_mc_filter(dev, ha->addr);
  1109. }
  1110. netif_addr_unlock_bh(dev);
  1111. dvb_net_feed_start(dev);
  1112. }
  1113. static void dvb_net_set_multicast_list (struct net_device *dev)
  1114. {
  1115. struct dvb_net_priv *priv = netdev_priv(dev);
  1116. schedule_work(&priv->set_multicast_list_wq);
  1117. }
  1118. static void wq_restart_net_feed (struct work_struct *work)
  1119. {
  1120. struct dvb_net_priv *priv =
  1121. container_of(work, struct dvb_net_priv, restart_net_feed_wq);
  1122. struct net_device *dev = priv->net;
  1123. if (netif_running(dev)) {
  1124. dvb_net_feed_stop(dev);
  1125. dvb_net_feed_start(dev);
  1126. }
  1127. }
  1128. static int dvb_net_set_mac (struct net_device *dev, void *p)
  1129. {
  1130. struct dvb_net_priv *priv = netdev_priv(dev);
  1131. struct sockaddr *addr=p;
  1132. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  1133. if (netif_running(dev))
  1134. schedule_work(&priv->restart_net_feed_wq);
  1135. return 0;
  1136. }
  1137. static int dvb_net_open(struct net_device *dev)
  1138. {
  1139. struct dvb_net_priv *priv = netdev_priv(dev);
  1140. priv->in_use++;
  1141. dvb_net_feed_start(dev);
  1142. return 0;
  1143. }
  1144. static int dvb_net_stop(struct net_device *dev)
  1145. {
  1146. struct dvb_net_priv *priv = netdev_priv(dev);
  1147. priv->in_use--;
  1148. return dvb_net_feed_stop(dev);
  1149. }
  1150. static const struct header_ops dvb_header_ops = {
  1151. .create = eth_header,
  1152. .parse = eth_header_parse,
  1153. };
  1154. static const struct net_device_ops dvb_netdev_ops = {
  1155. .ndo_open = dvb_net_open,
  1156. .ndo_stop = dvb_net_stop,
  1157. .ndo_start_xmit = dvb_net_tx,
  1158. .ndo_set_rx_mode = dvb_net_set_multicast_list,
  1159. .ndo_set_mac_address = dvb_net_set_mac,
  1160. .ndo_validate_addr = eth_validate_addr,
  1161. };
  1162. static void dvb_net_setup(struct net_device *dev)
  1163. {
  1164. ether_setup(dev);
  1165. dev->header_ops = &dvb_header_ops;
  1166. dev->netdev_ops = &dvb_netdev_ops;
  1167. dev->mtu = 4096;
  1168. dev->max_mtu = 4096;
  1169. dev->flags |= IFF_NOARP;
  1170. }
  1171. static int get_if(struct dvb_net *dvbnet)
  1172. {
  1173. int i;
  1174. for (i=0; i<DVB_NET_DEVICES_MAX; i++)
  1175. if (!dvbnet->state[i])
  1176. break;
  1177. if (i == DVB_NET_DEVICES_MAX)
  1178. return -1;
  1179. dvbnet->state[i]=1;
  1180. return i;
  1181. }
  1182. static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
  1183. {
  1184. struct net_device *net;
  1185. struct dvb_net_priv *priv;
  1186. int result;
  1187. int if_num;
  1188. if (feedtype != DVB_NET_FEEDTYPE_MPE && feedtype != DVB_NET_FEEDTYPE_ULE)
  1189. return -EINVAL;
  1190. if ((if_num = get_if(dvbnet)) < 0)
  1191. return -EINVAL;
  1192. net = alloc_netdev(sizeof(struct dvb_net_priv), "dvb",
  1193. NET_NAME_UNKNOWN, dvb_net_setup);
  1194. if (!net)
  1195. return -ENOMEM;
  1196. if (dvbnet->dvbdev->id)
  1197. snprintf(net->name, IFNAMSIZ, "dvb%d%u%d",
  1198. dvbnet->dvbdev->adapter->num, dvbnet->dvbdev->id, if_num);
  1199. else
  1200. /* compatibility fix to keep dvb0_0 format */
  1201. snprintf(net->name, IFNAMSIZ, "dvb%d_%d",
  1202. dvbnet->dvbdev->adapter->num, if_num);
  1203. net->addr_len = 6;
  1204. memcpy(net->dev_addr, dvbnet->dvbdev->adapter->proposed_mac, 6);
  1205. dvbnet->device[if_num] = net;
  1206. priv = netdev_priv(net);
  1207. priv->net = net;
  1208. priv->demux = dvbnet->demux;
  1209. priv->pid = pid;
  1210. priv->rx_mode = RX_MODE_UNI;
  1211. priv->need_pusi = 1;
  1212. priv->tscc = 0;
  1213. priv->feedtype = feedtype;
  1214. reset_ule(priv);
  1215. INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list);
  1216. INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed);
  1217. mutex_init(&priv->mutex);
  1218. net->base_addr = pid;
  1219. if ((result = register_netdev(net)) < 0) {
  1220. dvbnet->device[if_num] = NULL;
  1221. free_netdev(net);
  1222. return result;
  1223. }
  1224. pr_info("created network interface %s\n", net->name);
  1225. return if_num;
  1226. }
  1227. static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
  1228. {
  1229. struct net_device *net = dvbnet->device[num];
  1230. struct dvb_net_priv *priv;
  1231. if (!dvbnet->state[num])
  1232. return -EINVAL;
  1233. priv = netdev_priv(net);
  1234. if (priv->in_use)
  1235. return -EBUSY;
  1236. dvb_net_stop(net);
  1237. flush_work(&priv->set_multicast_list_wq);
  1238. flush_work(&priv->restart_net_feed_wq);
  1239. pr_info("removed network interface %s\n", net->name);
  1240. unregister_netdev(net);
  1241. dvbnet->state[num]=0;
  1242. dvbnet->device[num] = NULL;
  1243. free_netdev(net);
  1244. return 0;
  1245. }
  1246. static int dvb_net_do_ioctl(struct file *file,
  1247. unsigned int cmd, void *parg)
  1248. {
  1249. struct dvb_device *dvbdev = file->private_data;
  1250. struct dvb_net *dvbnet = dvbdev->priv;
  1251. int ret = 0;
  1252. if (((file->f_flags&O_ACCMODE)==O_RDONLY))
  1253. return -EPERM;
  1254. if (mutex_lock_interruptible(&dvbnet->ioctl_mutex))
  1255. return -ERESTARTSYS;
  1256. switch (cmd) {
  1257. case NET_ADD_IF:
  1258. {
  1259. struct dvb_net_if *dvbnetif = parg;
  1260. int result;
  1261. if (!capable(CAP_SYS_ADMIN)) {
  1262. ret = -EPERM;
  1263. goto ioctl_error;
  1264. }
  1265. if (!try_module_get(dvbdev->adapter->module)) {
  1266. ret = -EPERM;
  1267. goto ioctl_error;
  1268. }
  1269. result=dvb_net_add_if(dvbnet, dvbnetif->pid, dvbnetif->feedtype);
  1270. if (result<0) {
  1271. module_put(dvbdev->adapter->module);
  1272. ret = result;
  1273. goto ioctl_error;
  1274. }
  1275. dvbnetif->if_num=result;
  1276. break;
  1277. }
  1278. case NET_GET_IF:
  1279. {
  1280. struct net_device *netdev;
  1281. struct dvb_net_priv *priv_data;
  1282. struct dvb_net_if *dvbnetif = parg;
  1283. if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
  1284. !dvbnet->state[dvbnetif->if_num]) {
  1285. ret = -EINVAL;
  1286. goto ioctl_error;
  1287. }
  1288. netdev = dvbnet->device[dvbnetif->if_num];
  1289. priv_data = netdev_priv(netdev);
  1290. dvbnetif->pid=priv_data->pid;
  1291. dvbnetif->feedtype=priv_data->feedtype;
  1292. break;
  1293. }
  1294. case NET_REMOVE_IF:
  1295. {
  1296. if (!capable(CAP_SYS_ADMIN)) {
  1297. ret = -EPERM;
  1298. goto ioctl_error;
  1299. }
  1300. if ((unsigned long) parg >= DVB_NET_DEVICES_MAX) {
  1301. ret = -EINVAL;
  1302. goto ioctl_error;
  1303. }
  1304. ret = dvb_net_remove_if(dvbnet, (unsigned long) parg);
  1305. if (!ret)
  1306. module_put(dvbdev->adapter->module);
  1307. break;
  1308. }
  1309. /* binary compatibility cruft */
  1310. case __NET_ADD_IF_OLD:
  1311. {
  1312. struct __dvb_net_if_old *dvbnetif = parg;
  1313. int result;
  1314. if (!capable(CAP_SYS_ADMIN)) {
  1315. ret = -EPERM;
  1316. goto ioctl_error;
  1317. }
  1318. if (!try_module_get(dvbdev->adapter->module)) {
  1319. ret = -EPERM;
  1320. goto ioctl_error;
  1321. }
  1322. result=dvb_net_add_if(dvbnet, dvbnetif->pid, DVB_NET_FEEDTYPE_MPE);
  1323. if (result<0) {
  1324. module_put(dvbdev->adapter->module);
  1325. ret = result;
  1326. goto ioctl_error;
  1327. }
  1328. dvbnetif->if_num=result;
  1329. break;
  1330. }
  1331. case __NET_GET_IF_OLD:
  1332. {
  1333. struct net_device *netdev;
  1334. struct dvb_net_priv *priv_data;
  1335. struct __dvb_net_if_old *dvbnetif = parg;
  1336. if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
  1337. !dvbnet->state[dvbnetif->if_num]) {
  1338. ret = -EINVAL;
  1339. goto ioctl_error;
  1340. }
  1341. netdev = dvbnet->device[dvbnetif->if_num];
  1342. priv_data = netdev_priv(netdev);
  1343. dvbnetif->pid=priv_data->pid;
  1344. break;
  1345. }
  1346. default:
  1347. ret = -ENOTTY;
  1348. break;
  1349. }
  1350. ioctl_error:
  1351. mutex_unlock(&dvbnet->ioctl_mutex);
  1352. return ret;
  1353. }
  1354. static long dvb_net_ioctl(struct file *file,
  1355. unsigned int cmd, unsigned long arg)
  1356. {
  1357. return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
  1358. }
  1359. static int dvb_net_close(struct inode *inode, struct file *file)
  1360. {
  1361. struct dvb_device *dvbdev = file->private_data;
  1362. struct dvb_net *dvbnet = dvbdev->priv;
  1363. dvb_generic_release(inode, file);
  1364. if(dvbdev->users == 1 && dvbnet->exit == 1)
  1365. wake_up(&dvbdev->wait_queue);
  1366. return 0;
  1367. }
  1368. static const struct file_operations dvb_net_fops = {
  1369. .owner = THIS_MODULE,
  1370. .unlocked_ioctl = dvb_net_ioctl,
  1371. .open = dvb_generic_open,
  1372. .release = dvb_net_close,
  1373. .llseek = noop_llseek,
  1374. };
  1375. static const struct dvb_device dvbdev_net = {
  1376. .priv = NULL,
  1377. .users = 1,
  1378. .writers = 1,
  1379. #if defined(CONFIG_MEDIA_CONTROLLER_DVB)
  1380. .name = "dvb-net",
  1381. #endif
  1382. .fops = &dvb_net_fops,
  1383. };
  1384. void dvb_net_release (struct dvb_net *dvbnet)
  1385. {
  1386. int i;
  1387. dvbnet->exit = 1;
  1388. if (dvbnet->dvbdev->users < 1)
  1389. wait_event(dvbnet->dvbdev->wait_queue,
  1390. dvbnet->dvbdev->users==1);
  1391. dvb_unregister_device(dvbnet->dvbdev);
  1392. for (i=0; i<DVB_NET_DEVICES_MAX; i++) {
  1393. if (!dvbnet->state[i])
  1394. continue;
  1395. dvb_net_remove_if(dvbnet, i);
  1396. }
  1397. }
  1398. EXPORT_SYMBOL(dvb_net_release);
  1399. int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet,
  1400. struct dmx_demux *dmx)
  1401. {
  1402. int i;
  1403. mutex_init(&dvbnet->ioctl_mutex);
  1404. dvbnet->demux = dmx;
  1405. for (i=0; i<DVB_NET_DEVICES_MAX; i++)
  1406. dvbnet->state[i] = 0;
  1407. return dvb_register_device(adap, &dvbnet->dvbdev, &dvbdev_net,
  1408. dvbnet, DVB_DEVICE_NET, 0);
  1409. }
  1410. EXPORT_SYMBOL(dvb_net_init);