dvb_net.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638
  1. /*
  2. * dvb_net.c
  3. *
  4. * Copyright (C) 2001 Convergence integrated media GmbH
  5. * Ralph Metzler <ralph@convergence.de>
  6. * Copyright (C) 2002 Ralph Metzler <rjkm@metzlerbros.de>
  7. *
  8. * ULE Decapsulation code:
  9. * Copyright (C) 2003, 2004 gcs - Global Communication & Services GmbH.
  10. * and Department of Scientific Computing
  11. * Paris Lodron University of Salzburg.
  12. * Hilmar Linder <hlinder@cosy.sbg.ac.at>
  13. * and Wolfram Stering <wstering@cosy.sbg.ac.at>
  14. *
  15. * ULE Decaps according to RFC 4326.
  16. *
  17. * This program is free software; you can redistribute it and/or
  18. * modify it under the terms of the GNU General Public License
  19. * as published by the Free Software Foundation; either version 2
  20. * of the License, or (at your option) any later version.
  21. *
  22. * This program is distributed in the hope that it will be useful,
  23. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  24. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  25. * GNU General Public License for more details.
  26. * To obtain the license, point your browser to
  27. * http://www.gnu.org/copyleft/gpl.html
  28. */
  29. /*
  30. * ULE ChangeLog:
  31. * Feb 2004: hl/ws v1: Implementing draft-fair-ipdvb-ule-01.txt
  32. *
  33. * Dec 2004: hl/ws v2: Implementing draft-ietf-ipdvb-ule-03.txt:
  34. * ULE Extension header handling.
  35. * Bugreports by Moritz Vieth and Hanno Tersteegen,
  36. * Fraunhofer Institute for Open Communication Systems
  37. * Competence Center for Advanced Satellite Communications.
  38. * Bugfixes and robustness improvements.
  39. * Filtering on dest MAC addresses, if present (D-Bit = 0)
  40. * ULE_DEBUG compile-time option.
  41. * Apr 2006: cp v3: Bugfixes and compliency with RFC 4326 (ULE) by
  42. * Christian Praehauser <cpraehaus@cosy.sbg.ac.at>,
  43. * Paris Lodron University of Salzburg.
  44. */
  45. /*
  46. * FIXME / TODO (dvb_net.c):
  47. *
  48. * Unloading does not work for 2.6.9 kernels: a refcount doesn't go to zero.
  49. *
  50. */
  51. #define pr_fmt(fmt) "dvb_net: " fmt
  52. #include <linux/module.h>
  53. #include <linux/kernel.h>
  54. #include <linux/netdevice.h>
  55. #include <linux/etherdevice.h>
  56. #include <linux/dvb/net.h>
  57. #include <linux/uio.h>
  58. #include <linux/uaccess.h>
  59. #include <linux/crc32.h>
  60. #include <linux/mutex.h>
  61. #include <linux/sched.h>
  62. #include "dvb_demux.h"
  63. #include "dvb_net.h"
  64. static inline __u32 iov_crc32( __u32 c, struct kvec *iov, unsigned int cnt )
  65. {
  66. unsigned int j;
  67. for (j = 0; j < cnt; j++)
  68. c = crc32_be( c, iov[j].iov_base, iov[j].iov_len );
  69. return c;
  70. }
  71. #define DVB_NET_MULTICAST_MAX 10
  72. #undef ULE_DEBUG
  73. #ifdef ULE_DEBUG
  74. static void hexdump(const unsigned char *buf, unsigned short len)
  75. {
  76. print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true);
  77. }
  78. #endif
  79. struct dvb_net_priv {
  80. int in_use;
  81. u16 pid;
  82. struct net_device *net;
  83. struct dvb_net *host;
  84. struct dmx_demux *demux;
  85. struct dmx_section_feed *secfeed;
  86. struct dmx_section_filter *secfilter;
  87. struct dmx_ts_feed *tsfeed;
  88. int multi_num;
  89. struct dmx_section_filter *multi_secfilter[DVB_NET_MULTICAST_MAX];
  90. unsigned char multi_macs[DVB_NET_MULTICAST_MAX][6];
  91. int rx_mode;
  92. #define RX_MODE_UNI 0
  93. #define RX_MODE_MULTI 1
  94. #define RX_MODE_ALL_MULTI 2
  95. #define RX_MODE_PROMISC 3
  96. struct work_struct set_multicast_list_wq;
  97. struct work_struct restart_net_feed_wq;
  98. unsigned char feedtype; /* Either FEED_TYPE_ or FEED_TYPE_ULE */
  99. int need_pusi; /* Set to 1, if synchronization on PUSI required. */
  100. unsigned char tscc; /* TS continuity counter after sync on PUSI. */
  101. struct sk_buff *ule_skb; /* ULE SNDU decodes into this buffer. */
  102. unsigned char *ule_next_hdr; /* Pointer into skb to next ULE extension header. */
  103. unsigned short ule_sndu_len; /* ULE SNDU length in bytes, w/o D-Bit. */
  104. unsigned short ule_sndu_type; /* ULE SNDU type field, complete. */
  105. unsigned char ule_sndu_type_1; /* ULE SNDU type field, if split across 2 TS cells. */
  106. unsigned char ule_dbit; /* Whether the DestMAC address present
  107. * or not (bit is set). */
  108. unsigned char ule_bridged; /* Whether the ULE_BRIDGED extension header was found. */
  109. int ule_sndu_remain; /* Nr. of bytes still required for current ULE SNDU. */
  110. unsigned long ts_count; /* Current ts cell counter. */
  111. struct mutex mutex;
  112. };
  113. /**
  114. * Determine the packet's protocol ID. The rule here is that we
  115. * assume 802.3 if the type field is short enough to be a length.
  116. * This is normal practice and works for any 'now in use' protocol.
  117. *
  118. * stolen from eth.c out of the linux kernel, hacked for dvb-device
  119. * by Michael Holzt <kju@debian.org>
  120. */
  121. static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
  122. struct net_device *dev)
  123. {
  124. struct ethhdr *eth;
  125. unsigned char *rawp;
  126. skb_reset_mac_header(skb);
  127. skb_pull(skb,dev->hard_header_len);
  128. eth = eth_hdr(skb);
  129. if (*eth->h_dest & 1) {
  130. if(ether_addr_equal(eth->h_dest,dev->broadcast))
  131. skb->pkt_type=PACKET_BROADCAST;
  132. else
  133. skb->pkt_type=PACKET_MULTICAST;
  134. }
  135. if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
  136. return eth->h_proto;
  137. rawp = skb->data;
  138. /**
  139. * This is a magic hack to spot IPX packets. Older Novell breaks
  140. * the protocol design and runs IPX over 802.3 without an 802.2 LLC
  141. * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
  142. * won't work for fault tolerant netware but does for the rest.
  143. */
  144. if (*(unsigned short *)rawp == 0xFFFF)
  145. return htons(ETH_P_802_3);
  146. /**
  147. * Real 802.2 LLC
  148. */
  149. return htons(ETH_P_802_2);
  150. }
  151. #define TS_SZ 188
  152. #define TS_SYNC 0x47
  153. #define TS_TEI 0x80
  154. #define TS_SC 0xC0
  155. #define TS_PUSI 0x40
  156. #define TS_AF_A 0x20
  157. #define TS_AF_D 0x10
  158. /* ULE Extension Header handlers. */
  159. #define ULE_TEST 0
  160. #define ULE_BRIDGED 1
  161. #define ULE_OPTEXTHDR_PADDING 0
  162. static int ule_test_sndu( struct dvb_net_priv *p )
  163. {
  164. return -1;
  165. }
  166. static int ule_bridged_sndu( struct dvb_net_priv *p )
  167. {
  168. struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr;
  169. if(ntohs(hdr->h_proto) < ETH_P_802_3_MIN) {
  170. int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data);
  171. /* A frame Type < ETH_P_802_3_MIN for a bridged frame, introduces a LLC Length field. */
  172. if(framelen != ntohs(hdr->h_proto)) {
  173. return -1;
  174. }
  175. }
  176. /* Note:
  177. * From RFC4326:
  178. * "A bridged SNDU is a Mandatory Extension Header of Type 1.
  179. * It must be the final (or only) extension header specified in the header chain of a SNDU."
  180. * The 'ule_bridged' flag will cause the extension header processing loop to terminate.
  181. */
  182. p->ule_bridged = 1;
  183. return 0;
  184. }
  185. static int ule_exthdr_padding(struct dvb_net_priv *p)
  186. {
  187. return 0;
  188. }
  189. /** Handle ULE extension headers.
  190. * Function is called after a successful CRC32 verification of an ULE SNDU to complete its decoding.
  191. * Returns: >= 0: nr. of bytes consumed by next extension header
  192. * -1: Mandatory extension header that is not recognized or TEST SNDU; discard.
  193. */
  194. static int handle_one_ule_extension( struct dvb_net_priv *p )
  195. {
  196. /* Table of mandatory extension header handlers. The header type is the index. */
  197. static int (*ule_mandatory_ext_handlers[255])( struct dvb_net_priv *p ) =
  198. { [0] = ule_test_sndu, [1] = ule_bridged_sndu, [2] = NULL, };
  199. /* Table of optional extension header handlers. The header type is the index. */
  200. static int (*ule_optional_ext_handlers[255])( struct dvb_net_priv *p ) =
  201. { [0] = ule_exthdr_padding, [1] = NULL, };
  202. int ext_len = 0;
  203. unsigned char hlen = (p->ule_sndu_type & 0x0700) >> 8;
  204. unsigned char htype = p->ule_sndu_type & 0x00FF;
  205. /* Discriminate mandatory and optional extension headers. */
  206. if (hlen == 0) {
  207. /* Mandatory extension header */
  208. if (ule_mandatory_ext_handlers[htype]) {
  209. ext_len = ule_mandatory_ext_handlers[htype]( p );
  210. if(ext_len >= 0) {
  211. p->ule_next_hdr += ext_len;
  212. if (!p->ule_bridged) {
  213. p->ule_sndu_type = ntohs(*(__be16 *)p->ule_next_hdr);
  214. p->ule_next_hdr += 2;
  215. } else {
  216. p->ule_sndu_type = ntohs(*(__be16 *)(p->ule_next_hdr + ((p->ule_dbit ? 2 : 3) * ETH_ALEN)));
  217. /* This assures the extension handling loop will terminate. */
  218. }
  219. }
  220. // else: extension handler failed or SNDU should be discarded
  221. } else
  222. ext_len = -1; /* SNDU has to be discarded. */
  223. } else {
  224. /* Optional extension header. Calculate the length. */
  225. ext_len = hlen << 1;
  226. /* Process the optional extension header according to its type. */
  227. if (ule_optional_ext_handlers[htype])
  228. (void)ule_optional_ext_handlers[htype]( p );
  229. p->ule_next_hdr += ext_len;
  230. p->ule_sndu_type = ntohs( *(__be16 *)(p->ule_next_hdr-2) );
  231. /*
  232. * note: the length of the next header type is included in the
  233. * length of THIS optional extension header
  234. */
  235. }
  236. return ext_len;
  237. }
  238. static int handle_ule_extensions( struct dvb_net_priv *p )
  239. {
  240. int total_ext_len = 0, l;
  241. p->ule_next_hdr = p->ule_skb->data;
  242. do {
  243. l = handle_one_ule_extension( p );
  244. if (l < 0)
  245. return l; /* Stop extension header processing and discard SNDU. */
  246. total_ext_len += l;
  247. #ifdef ULE_DEBUG
  248. pr_debug("ule_next_hdr=%p, ule_sndu_type=%i, l=%i, total_ext_len=%i\n",
  249. p->ule_next_hdr, (int)p->ule_sndu_type,
  250. l, total_ext_len);
  251. #endif
  252. } while (p->ule_sndu_type < ETH_P_802_3_MIN);
  253. return total_ext_len;
  254. }
  255. /** Prepare for a new ULE SNDU: reset the decoder state. */
  256. static inline void reset_ule( struct dvb_net_priv *p )
  257. {
  258. p->ule_skb = NULL;
  259. p->ule_next_hdr = NULL;
  260. p->ule_sndu_len = 0;
  261. p->ule_sndu_type = 0;
  262. p->ule_sndu_type_1 = 0;
  263. p->ule_sndu_remain = 0;
  264. p->ule_dbit = 0xFF;
  265. p->ule_bridged = 0;
  266. }
  267. /**
  268. * Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of
  269. * TS cells of a single PID.
  270. */
  271. struct dvb_net_ule_handle {
  272. struct net_device *dev;
  273. struct dvb_net_priv *priv;
  274. struct ethhdr *ethh;
  275. const u8 *buf;
  276. size_t buf_len;
  277. unsigned long skipped;
  278. const u8 *ts, *ts_end, *from_where;
  279. u8 ts_remain, how_much, new_ts;
  280. bool error;
  281. #ifdef ULE_DEBUG
  282. /*
  283. * The code inside ULE_DEBUG keeps a history of the
  284. * last 100 TS cells processed.
  285. */
  286. static unsigned char ule_hist[100*TS_SZ];
  287. static unsigned char *ule_where = ule_hist, ule_dump;
  288. #endif
  289. };
  290. static int dvb_net_ule_new_ts_cell(struct dvb_net_ule_handle *h)
  291. {
  292. /* We are about to process a new TS cell. */
  293. #ifdef ULE_DEBUG
  294. if (h->ule_where >= &h->ule_hist[100*TS_SZ])
  295. h->ule_where = h->ule_hist;
  296. memcpy(h->ule_where, h->ts, TS_SZ);
  297. if (h->ule_dump) {
  298. hexdump(h->ule_where, TS_SZ);
  299. h->ule_dump = 0;
  300. }
  301. h->ule_where += TS_SZ;
  302. #endif
  303. /*
  304. * Check TS h->error conditions: sync_byte, transport_error_indicator,
  305. * scrambling_control .
  306. */
  307. if ((h->ts[0] != TS_SYNC) || (h->ts[1] & TS_TEI) ||
  308. ((h->ts[3] & TS_SC) != 0)) {
  309. pr_warn("%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n",
  310. h->priv->ts_count, h->ts[0],
  311. (h->ts[1] & TS_TEI) >> 7,
  312. (h->ts[3] & TS_SC) >> 6);
  313. /* Drop partly decoded SNDU, reset state, resync on PUSI. */
  314. if (h->priv->ule_skb) {
  315. dev_kfree_skb(h->priv->ule_skb);
  316. /* Prepare for next SNDU. */
  317. h->dev->stats.rx_errors++;
  318. h->dev->stats.rx_frame_errors++;
  319. }
  320. reset_ule(h->priv);
  321. h->priv->need_pusi = 1;
  322. /* Continue with next TS cell. */
  323. h->ts += TS_SZ;
  324. h->priv->ts_count++;
  325. return 1;
  326. }
  327. h->ts_remain = 184;
  328. h->from_where = h->ts + 4;
  329. return 0;
  330. }
  331. static int dvb_net_ule_ts_pusi(struct dvb_net_ule_handle *h)
  332. {
  333. if (h->ts[1] & TS_PUSI) {
  334. /* Find beginning of first ULE SNDU in current TS cell. */
  335. /* Synchronize continuity counter. */
  336. h->priv->tscc = h->ts[3] & 0x0F;
  337. /* There is a pointer field here. */
  338. if (h->ts[4] > h->ts_remain) {
  339. pr_err("%lu: Invalid ULE packet (pointer field %d)\n",
  340. h->priv->ts_count, h->ts[4]);
  341. h->ts += TS_SZ;
  342. h->priv->ts_count++;
  343. return 1;
  344. }
  345. /* Skip to destination of pointer field. */
  346. h->from_where = &h->ts[5] + h->ts[4];
  347. h->ts_remain -= 1 + h->ts[4];
  348. h->skipped = 0;
  349. } else {
  350. h->skipped++;
  351. h->ts += TS_SZ;
  352. h->priv->ts_count++;
  353. return 1;
  354. }
  355. return 0;
  356. }
  357. static int dvb_net_ule_new_ts(struct dvb_net_ule_handle *h)
  358. {
  359. /* Check continuity counter. */
  360. if ((h->ts[3] & 0x0F) == h->priv->tscc)
  361. h->priv->tscc = (h->priv->tscc + 1) & 0x0F;
  362. else {
  363. /* TS discontinuity handling: */
  364. pr_warn("%lu: TS discontinuity: got %#x, expected %#x.\n",
  365. h->priv->ts_count, h->ts[3] & 0x0F,
  366. h->priv->tscc);
  367. /* Drop partly decoded SNDU, reset state, resync on PUSI. */
  368. if (h->priv->ule_skb) {
  369. dev_kfree_skb(h->priv->ule_skb);
  370. /* Prepare for next SNDU. */
  371. // reset_ule(h->priv); moved to below.
  372. h->dev->stats.rx_errors++;
  373. h->dev->stats.rx_frame_errors++;
  374. }
  375. reset_ule(h->priv);
  376. /* skip to next PUSI. */
  377. h->priv->need_pusi = 1;
  378. return 1;
  379. }
  380. /*
  381. * If we still have an incomplete payload, but PUSI is
  382. * set; some TS cells are missing.
  383. * This is only possible here, if we missed exactly 16 TS
  384. * cells (continuity counter wrap).
  385. */
  386. if (h->ts[1] & TS_PUSI) {
  387. if (!h->priv->need_pusi) {
  388. if (!(*h->from_where < (h->ts_remain-1)) ||
  389. *h->from_where != h->priv->ule_sndu_remain) {
  390. /*
  391. * Pointer field is invalid.
  392. * Drop this TS cell and any started ULE SNDU.
  393. */
  394. pr_warn("%lu: Invalid pointer field: %u.\n",
  395. h->priv->ts_count,
  396. *h->from_where);
  397. /*
  398. * Drop partly decoded SNDU, reset state,
  399. * resync on PUSI.
  400. */
  401. if (h->priv->ule_skb) {
  402. h->error = true;
  403. dev_kfree_skb(h->priv->ule_skb);
  404. }
  405. if (h->error || h->priv->ule_sndu_remain) {
  406. h->dev->stats.rx_errors++;
  407. h->dev->stats.rx_frame_errors++;
  408. h->error = false;
  409. }
  410. reset_ule(h->priv);
  411. h->priv->need_pusi = 1;
  412. return 1;
  413. }
  414. /*
  415. * Skip pointer field (we're processing a
  416. * packed payload).
  417. */
  418. h->from_where += 1;
  419. h->ts_remain -= 1;
  420. } else
  421. h->priv->need_pusi = 0;
  422. if (h->priv->ule_sndu_remain > 183) {
  423. /*
  424. * Current SNDU lacks more data than there
  425. * could be available in the current TS cell.
  426. */
  427. h->dev->stats.rx_errors++;
  428. h->dev->stats.rx_length_errors++;
  429. pr_warn("%lu: Expected %d more SNDU bytes, but got PUSI (pf %d, h->ts_remain %d). Flushing incomplete payload.\n",
  430. h->priv->ts_count,
  431. h->priv->ule_sndu_remain,
  432. h->ts[4], h->ts_remain);
  433. dev_kfree_skb(h->priv->ule_skb);
  434. /* Prepare for next SNDU. */
  435. reset_ule(h->priv);
  436. /*
  437. * Resync: go to where pointer field points to:
  438. * start of next ULE SNDU.
  439. */
  440. h->from_where += h->ts[4];
  441. h->ts_remain -= h->ts[4];
  442. }
  443. }
  444. return 0;
  445. }
  446. /*
  447. * Start a new payload with skb.
  448. * Find ULE header. It is only guaranteed that the
  449. * length field (2 bytes) is contained in the current
  450. * TS.
  451. * Check h.ts_remain has to be >= 2 here.
  452. */
  453. static int dvb_net_ule_new_payload(struct dvb_net_ule_handle *h)
  454. {
  455. if (h->ts_remain < 2) {
  456. pr_warn("Invalid payload packing: only %d bytes left in TS. Resyncing.\n",
  457. h->ts_remain);
  458. h->priv->ule_sndu_len = 0;
  459. h->priv->need_pusi = 1;
  460. h->ts += TS_SZ;
  461. return 1;
  462. }
  463. if (!h->priv->ule_sndu_len) {
  464. /* Got at least two bytes, thus extrace the SNDU length. */
  465. h->priv->ule_sndu_len = h->from_where[0] << 8 |
  466. h->from_where[1];
  467. if (h->priv->ule_sndu_len & 0x8000) {
  468. /* D-Bit is set: no dest mac present. */
  469. h->priv->ule_sndu_len &= 0x7FFF;
  470. h->priv->ule_dbit = 1;
  471. } else
  472. h->priv->ule_dbit = 0;
  473. if (h->priv->ule_sndu_len < 5) {
  474. pr_warn("%lu: Invalid ULE SNDU length %u. Resyncing.\n",
  475. h->priv->ts_count,
  476. h->priv->ule_sndu_len);
  477. h->dev->stats.rx_errors++;
  478. h->dev->stats.rx_length_errors++;
  479. h->priv->ule_sndu_len = 0;
  480. h->priv->need_pusi = 1;
  481. h->new_ts = 1;
  482. h->ts += TS_SZ;
  483. h->priv->ts_count++;
  484. return 1;
  485. }
  486. h->ts_remain -= 2; /* consume the 2 bytes SNDU length. */
  487. h->from_where += 2;
  488. }
  489. h->priv->ule_sndu_remain = h->priv->ule_sndu_len + 2;
  490. /*
  491. * State of current TS:
  492. * h->ts_remain (remaining bytes in the current TS cell)
  493. * 0 ule_type is not available now, we need the next TS cell
  494. * 1 the first byte of the ule_type is present
  495. * >=2 full ULE header present, maybe some payload data as well.
  496. */
  497. switch (h->ts_remain) {
  498. case 1:
  499. h->priv->ule_sndu_remain--;
  500. h->priv->ule_sndu_type = h->from_where[0] << 8;
  501. /* first byte of ule_type is set. */
  502. h->priv->ule_sndu_type_1 = 1;
  503. h->ts_remain -= 1;
  504. h->from_where += 1;
  505. /* fallthrough */
  506. case 0:
  507. h->new_ts = 1;
  508. h->ts += TS_SZ;
  509. h->priv->ts_count++;
  510. return 1;
  511. default: /* complete ULE header is present in current TS. */
  512. /* Extract ULE type field. */
  513. if (h->priv->ule_sndu_type_1) {
  514. h->priv->ule_sndu_type_1 = 0;
  515. h->priv->ule_sndu_type |= h->from_where[0];
  516. h->from_where += 1; /* points to payload start. */
  517. h->ts_remain -= 1;
  518. } else {
  519. /* Complete type is present in new TS. */
  520. h->priv->ule_sndu_type = h->from_where[0] << 8 |
  521. h->from_where[1];
  522. h->from_where += 2; /* points to payload start. */
  523. h->ts_remain -= 2;
  524. }
  525. break;
  526. }
  527. /*
  528. * Allocate the skb (decoder target buffer) with the correct size,
  529. * as follows:
  530. *
  531. * prepare for the largest case: bridged SNDU with MAC address
  532. * (dbit = 0).
  533. */
  534. h->priv->ule_skb = dev_alloc_skb(h->priv->ule_sndu_len +
  535. ETH_HLEN + ETH_ALEN);
  536. if (!h->priv->ule_skb) {
  537. pr_notice("%s: Memory squeeze, dropping packet.\n",
  538. h->dev->name);
  539. h->dev->stats.rx_dropped++;
  540. return -1;
  541. }
  542. /* This includes the CRC32 _and_ dest mac, if !dbit. */
  543. h->priv->ule_sndu_remain = h->priv->ule_sndu_len;
  544. h->priv->ule_skb->dev = h->dev;
  545. /*
  546. * Leave space for Ethernet or bridged SNDU header
  547. * (eth hdr plus one MAC addr).
  548. */
  549. skb_reserve(h->priv->ule_skb, ETH_HLEN + ETH_ALEN);
  550. return 0;
  551. }
  552. static int dvb_net_ule_should_drop(struct dvb_net_ule_handle *h)
  553. {
  554. static const u8 bc_addr[ETH_ALEN] = { [0 ... ETH_ALEN - 1] = 0xff };
  555. /*
  556. * The destination MAC address is the next data in the skb. It comes
  557. * before any extension headers.
  558. *
  559. * Check if the payload of this SNDU should be passed up the stack.
  560. */
  561. if (h->priv->rx_mode == RX_MODE_PROMISC)
  562. return 0;
  563. if (h->priv->ule_skb->data[0] & 0x01) {
  564. /* multicast or broadcast */
  565. if (!ether_addr_equal(h->priv->ule_skb->data, bc_addr)) {
  566. /* multicast */
  567. if (h->priv->rx_mode == RX_MODE_MULTI) {
  568. int i;
  569. for (i = 0; i < h->priv->multi_num &&
  570. !ether_addr_equal(h->priv->ule_skb->data,
  571. h->priv->multi_macs[i]);
  572. i++)
  573. ;
  574. if (i == h->priv->multi_num)
  575. return 1;
  576. } else if (h->priv->rx_mode != RX_MODE_ALL_MULTI)
  577. return 1; /* no broadcast; */
  578. /*
  579. * else:
  580. * all multicast mode: accept all multicast packets
  581. */
  582. }
  583. /* else: broadcast */
  584. } else if (!ether_addr_equal(h->priv->ule_skb->data, h->dev->dev_addr))
  585. return 1;
  586. return 0;
  587. }
  588. static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
  589. u32 ule_crc, u32 expected_crc)
  590. {
  591. u8 dest_addr[ETH_ALEN];
  592. if (ule_crc != expected_crc) {
  593. pr_warn("%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n",
  594. h->priv->ts_count, ule_crc, expected_crc,
  595. h->priv->ule_sndu_len, h->priv->ule_sndu_type,
  596. h->ts_remain,
  597. h->ts_remain > 2 ?
  598. *(unsigned short *)h->from_where : 0);
  599. #ifdef ULE_DEBUG
  600. hexdump(iov[0].iov_base, iov[0].iov_len);
  601. hexdump(iov[1].iov_base, iov[1].iov_len);
  602. hexdump(iov[2].iov_base, iov[2].iov_len);
  603. if (h->ule_where == h->ule_hist) {
  604. hexdump(&h->ule_hist[98*TS_SZ], TS_SZ);
  605. hexdump(&h->ule_hist[99*TS_SZ], TS_SZ);
  606. } else if (h->ule_where == &h->ule_hist[TS_SZ]) {
  607. hexdump(&h->ule_hist[99*TS_SZ], TS_SZ);
  608. hexdump(h->ule_hist, TS_SZ);
  609. } else {
  610. hexdump(h->ule_where - TS_SZ - TS_SZ, TS_SZ);
  611. hexdump(h->ule_where - TS_SZ, TS_SZ);
  612. }
  613. h->ule_dump = 1;
  614. #endif
  615. h->dev->stats.rx_errors++;
  616. h->dev->stats.rx_crc_errors++;
  617. dev_kfree_skb(h->priv->ule_skb);
  618. return;
  619. }
  620. /* CRC32 verified OK. */
  621. /* CRC32 was OK, so remove it from skb. */
  622. h->priv->ule_skb->tail -= 4;
  623. h->priv->ule_skb->len -= 4;
  624. if (!h->priv->ule_dbit) {
  625. if (dvb_net_ule_should_drop(h)) {
  626. #ifdef ULE_DEBUG
  627. netdev_dbg(h->dev,
  628. "Dropping SNDU: MAC destination address does not match: dest addr: %pM, h->dev addr: %pM\n",
  629. h->priv->ule_skb->data, h->dev->dev_addr);
  630. #endif
  631. dev_kfree_skb(h->priv->ule_skb);
  632. return;
  633. }
  634. skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
  635. ETH_ALEN);
  636. skb_pull(h->priv->ule_skb, ETH_ALEN);
  637. } else {
  638. /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
  639. eth_zero_addr(dest_addr);
  640. }
  641. /* Handle ULE Extension Headers. */
  642. if (h->priv->ule_sndu_type < ETH_P_802_3_MIN) {
  643. /* There is an extension header. Handle it accordingly. */
  644. int l = handle_ule_extensions(h->priv);
  645. if (l < 0) {
  646. /*
  647. * Mandatory extension header unknown or TEST SNDU.
  648. * Drop it.
  649. */
  650. // pr_warn("Dropping SNDU, extension headers.\n" );
  651. dev_kfree_skb(h->priv->ule_skb);
  652. return;
  653. }
  654. skb_pull(h->priv->ule_skb, l);
  655. }
  656. /*
  657. * Construct/assure correct ethernet header.
  658. * Note: in bridged mode (h->priv->ule_bridged != 0)
  659. * we already have the (original) ethernet
  660. * header at the start of the payload (after
  661. * optional dest. address and any extension
  662. * headers).
  663. */
  664. if (!h->priv->ule_bridged) {
  665. skb_push(h->priv->ule_skb, ETH_HLEN);
  666. h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
  667. memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
  668. eth_zero_addr(h->ethh->h_source);
  669. h->ethh->h_proto = htons(h->priv->ule_sndu_type);
  670. }
  671. /* else: skb is in correct state; nothing to do. */
  672. h->priv->ule_bridged = 0;
  673. /* Stuff into kernel's protocol stack. */
  674. h->priv->ule_skb->protocol = dvb_net_eth_type_trans(h->priv->ule_skb,
  675. h->dev);
  676. /*
  677. * If D-bit is set (i.e. destination MAC address not present),
  678. * receive the packet anyhow.
  679. */
  680. #if 0
  681. if (h->priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
  682. h->priv->ule_skb->pkt_type = PACKET_HOST;
  683. #endif
  684. h->dev->stats.rx_packets++;
  685. h->dev->stats.rx_bytes += h->priv->ule_skb->len;
  686. netif_rx(h->priv->ule_skb);
  687. }
  688. static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len)
  689. {
  690. int ret;
  691. struct dvb_net_ule_handle h = {
  692. .dev = dev,
  693. .buf = buf,
  694. .buf_len = buf_len,
  695. .skipped = 0L,
  696. .ts = NULL,
  697. .ts_end = NULL,
  698. .from_where = NULL,
  699. .ts_remain = 0,
  700. .how_much = 0,
  701. .new_ts = 1,
  702. .ethh = NULL,
  703. .error = false,
  704. #ifdef ULE_DEBUG
  705. .ule_where = ule_hist,
  706. #endif
  707. };
  708. /*
  709. * For all TS cells in current buffer.
  710. * Appearently, we are called for every single TS cell.
  711. */
  712. for (h.ts = h.buf, h.ts_end = h.buf + h.buf_len;
  713. h.ts < h.ts_end; /* no incr. */) {
  714. if (h.new_ts) {
  715. /* We are about to process a new TS cell. */
  716. if (dvb_net_ule_new_ts_cell(&h))
  717. continue;
  718. }
  719. /* Synchronize on PUSI, if required. */
  720. if (h.priv->need_pusi) {
  721. if (dvb_net_ule_ts_pusi(&h))
  722. continue;
  723. }
  724. if (h.new_ts) {
  725. if (dvb_net_ule_new_ts(&h))
  726. continue;
  727. }
  728. /* Check if new payload needs to be started. */
  729. if (h.priv->ule_skb == NULL) {
  730. ret = dvb_net_ule_new_payload(&h);
  731. if (ret < 0)
  732. return;
  733. if (ret)
  734. continue;
  735. }
  736. /* Copy data into our current skb. */
  737. h.how_much = min(h.priv->ule_sndu_remain, (int)h.ts_remain);
  738. skb_put_data(h.priv->ule_skb, h.from_where, h.how_much);
  739. h.priv->ule_sndu_remain -= h.how_much;
  740. h.ts_remain -= h.how_much;
  741. h.from_where += h.how_much;
  742. /* Check for complete payload. */
  743. if (h.priv->ule_sndu_remain <= 0) {
  744. /* Check CRC32, we've got it in our skb already. */
  745. __be16 ulen = htons(h.priv->ule_sndu_len);
  746. __be16 utype = htons(h.priv->ule_sndu_type);
  747. const u8 *tail;
  748. struct kvec iov[3] = {
  749. { &ulen, sizeof ulen },
  750. { &utype, sizeof utype },
  751. { h.priv->ule_skb->data,
  752. h.priv->ule_skb->len - 4 }
  753. };
  754. u32 ule_crc = ~0L, expected_crc;
  755. if (h.priv->ule_dbit) {
  756. /* Set D-bit for CRC32 verification,
  757. * if it was set originally. */
  758. ulen |= htons(0x8000);
  759. }
  760. ule_crc = iov_crc32(ule_crc, iov, 3);
  761. tail = skb_tail_pointer(h.priv->ule_skb);
  762. expected_crc = *(tail - 4) << 24 |
  763. *(tail - 3) << 16 |
  764. *(tail - 2) << 8 |
  765. *(tail - 1);
  766. dvb_net_ule_check_crc(&h, ule_crc, expected_crc);
  767. /* Prepare for next SNDU. */
  768. reset_ule(h.priv);
  769. }
  770. /* More data in current TS (look at the bytes following the CRC32)? */
  771. if (h.ts_remain >= 2 && *((unsigned short *)h.from_where) != 0xFFFF) {
  772. /* Next ULE SNDU starts right there. */
  773. h.new_ts = 0;
  774. h.priv->ule_skb = NULL;
  775. h.priv->ule_sndu_type_1 = 0;
  776. h.priv->ule_sndu_len = 0;
  777. // pr_warn("More data in current TS: [%#x %#x %#x %#x]\n",
  778. // *(h.from_where + 0), *(h.from_where + 1),
  779. // *(h.from_where + 2), *(h.from_where + 3));
  780. // pr_warn("h.ts @ %p, stopped @ %p:\n", h.ts, h.from_where + 0);
  781. // hexdump(h.ts, 188);
  782. } else {
  783. h.new_ts = 1;
  784. h.ts += TS_SZ;
  785. h.priv->ts_count++;
  786. if (h.priv->ule_skb == NULL) {
  787. h.priv->need_pusi = 1;
  788. h.priv->ule_sndu_type_1 = 0;
  789. h.priv->ule_sndu_len = 0;
  790. }
  791. }
  792. } /* for all available TS cells */
  793. }
  794. static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
  795. const u8 *buffer2, size_t buffer2_len,
  796. struct dmx_ts_feed *feed)
  797. {
  798. struct net_device *dev = feed->priv;
  799. if (buffer2)
  800. pr_warn("buffer2 not NULL: %p.\n", buffer2);
  801. if (buffer1_len > 32768)
  802. pr_warn("length > 32k: %zu.\n", buffer1_len);
  803. /* pr_info("TS callback: %u bytes, %u TS cells @ %p.\n",
  804. buffer1_len, buffer1_len / TS_SZ, buffer1); */
  805. dvb_net_ule(dev, buffer1, buffer1_len);
  806. return 0;
  807. }
  808. static void dvb_net_sec(struct net_device *dev,
  809. const u8 *pkt, int pkt_len)
  810. {
  811. u8 *eth;
  812. struct sk_buff *skb;
  813. struct net_device_stats *stats = &dev->stats;
  814. int snap = 0;
  815. /* note: pkt_len includes a 32bit checksum */
  816. if (pkt_len < 16) {
  817. pr_warn("%s: IP/MPE packet length = %d too small.\n",
  818. dev->name, pkt_len);
  819. stats->rx_errors++;
  820. stats->rx_length_errors++;
  821. return;
  822. }
  823. /* it seems some ISPs manage to screw up here, so we have to
  824. * relax the error checks... */
  825. #if 0
  826. if ((pkt[5] & 0xfd) != 0xc1) {
  827. /* drop scrambled or broken packets */
  828. #else
  829. if ((pkt[5] & 0x3c) != 0x00) {
  830. /* drop scrambled */
  831. #endif
  832. stats->rx_errors++;
  833. stats->rx_crc_errors++;
  834. return;
  835. }
  836. if (pkt[5] & 0x02) {
  837. /* handle LLC/SNAP, see rfc-1042 */
  838. if (pkt_len < 24 || memcmp(&pkt[12], "\xaa\xaa\x03\0\0\0", 6)) {
  839. stats->rx_dropped++;
  840. return;
  841. }
  842. snap = 8;
  843. }
  844. if (pkt[7]) {
  845. /* FIXME: assemble datagram from multiple sections */
  846. stats->rx_errors++;
  847. stats->rx_frame_errors++;
  848. return;
  849. }
  850. /* we have 14 byte ethernet header (ip header follows);
  851. * 12 byte MPE header; 4 byte checksum; + 2 byte alignment, 8 byte LLC/SNAP
  852. */
  853. if (!(skb = dev_alloc_skb(pkt_len - 4 - 12 + 14 + 2 - snap))) {
  854. //pr_notice("%s: Memory squeeze, dropping packet.\n", dev->name);
  855. stats->rx_dropped++;
  856. return;
  857. }
  858. skb_reserve(skb, 2); /* longword align L3 header */
  859. skb->dev = dev;
  860. /* copy L3 payload */
  861. eth = (u8 *) skb_put(skb, pkt_len - 12 - 4 + 14 - snap);
  862. memcpy(eth + 14, pkt + 12 + snap, pkt_len - 12 - 4 - snap);
  863. /* create ethernet header: */
  864. eth[0]=pkt[0x0b];
  865. eth[1]=pkt[0x0a];
  866. eth[2]=pkt[0x09];
  867. eth[3]=pkt[0x08];
  868. eth[4]=pkt[0x04];
  869. eth[5]=pkt[0x03];
  870. eth[6]=eth[7]=eth[8]=eth[9]=eth[10]=eth[11]=0;
  871. if (snap) {
  872. eth[12] = pkt[18];
  873. eth[13] = pkt[19];
  874. } else {
  875. /* protocol numbers are from rfc-1700 or
  876. * http://www.iana.org/assignments/ethernet-numbers
  877. */
  878. if (pkt[12] >> 4 == 6) { /* version field from IP header */
  879. eth[12] = 0x86; /* IPv6 */
  880. eth[13] = 0xdd;
  881. } else {
  882. eth[12] = 0x08; /* IPv4 */
  883. eth[13] = 0x00;
  884. }
  885. }
  886. skb->protocol = dvb_net_eth_type_trans(skb, dev);
  887. stats->rx_packets++;
  888. stats->rx_bytes+=skb->len;
  889. netif_rx(skb);
  890. }
  891. static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
  892. const u8 *buffer2, size_t buffer2_len,
  893. struct dmx_section_filter *filter)
  894. {
  895. struct net_device *dev = filter->priv;
  896. /**
  897. * we rely on the DVB API definition where exactly one complete
  898. * section is delivered in buffer1
  899. */
  900. dvb_net_sec (dev, buffer1, buffer1_len);
  901. return 0;
  902. }
  903. static int dvb_net_tx(struct sk_buff *skb, struct net_device *dev)
  904. {
  905. dev_kfree_skb(skb);
  906. return NETDEV_TX_OK;
  907. }
  908. static u8 mask_normal[6]={0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  909. static u8 mask_allmulti[6]={0xff, 0xff, 0xff, 0x00, 0x00, 0x00};
  910. static u8 mac_allmulti[6]={0x01, 0x00, 0x5e, 0x00, 0x00, 0x00};
  911. static u8 mask_promisc[6]={0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
  912. static int dvb_net_filter_sec_set(struct net_device *dev,
  913. struct dmx_section_filter **secfilter,
  914. u8 *mac, u8 *mac_mask)
  915. {
  916. struct dvb_net_priv *priv = netdev_priv(dev);
  917. int ret;
  918. *secfilter=NULL;
  919. ret = priv->secfeed->allocate_filter(priv->secfeed, secfilter);
  920. if (ret<0) {
  921. pr_err("%s: could not get filter\n", dev->name);
  922. return ret;
  923. }
  924. (*secfilter)->priv=(void *) dev;
  925. memset((*secfilter)->filter_value, 0x00, DMX_MAX_FILTER_SIZE);
  926. memset((*secfilter)->filter_mask, 0x00, DMX_MAX_FILTER_SIZE);
  927. memset((*secfilter)->filter_mode, 0xff, DMX_MAX_FILTER_SIZE);
  928. (*secfilter)->filter_value[0]=0x3e;
  929. (*secfilter)->filter_value[3]=mac[5];
  930. (*secfilter)->filter_value[4]=mac[4];
  931. (*secfilter)->filter_value[8]=mac[3];
  932. (*secfilter)->filter_value[9]=mac[2];
  933. (*secfilter)->filter_value[10]=mac[1];
  934. (*secfilter)->filter_value[11]=mac[0];
  935. (*secfilter)->filter_mask[0] = 0xff;
  936. (*secfilter)->filter_mask[3] = mac_mask[5];
  937. (*secfilter)->filter_mask[4] = mac_mask[4];
  938. (*secfilter)->filter_mask[8] = mac_mask[3];
  939. (*secfilter)->filter_mask[9] = mac_mask[2];
  940. (*secfilter)->filter_mask[10] = mac_mask[1];
  941. (*secfilter)->filter_mask[11]=mac_mask[0];
  942. netdev_dbg(dev, "filter mac=%pM mask=%pM\n", mac, mac_mask);
  943. return 0;
  944. }
  945. static int dvb_net_feed_start(struct net_device *dev)
  946. {
  947. int ret = 0, i;
  948. struct dvb_net_priv *priv = netdev_priv(dev);
  949. struct dmx_demux *demux = priv->demux;
  950. unsigned char *mac = (unsigned char *) dev->dev_addr;
  951. netdev_dbg(dev, "rx_mode %i\n", priv->rx_mode);
  952. mutex_lock(&priv->mutex);
  953. if (priv->tsfeed || priv->secfeed || priv->secfilter || priv->multi_secfilter[0])
  954. pr_err("%s: BUG %d\n", __func__, __LINE__);
  955. priv->secfeed=NULL;
  956. priv->secfilter=NULL;
  957. priv->tsfeed = NULL;
  958. if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) {
  959. netdev_dbg(dev, "alloc secfeed\n");
  960. ret=demux->allocate_section_feed(demux, &priv->secfeed,
  961. dvb_net_sec_callback);
  962. if (ret<0) {
  963. pr_err("%s: could not allocate section feed\n",
  964. dev->name);
  965. goto error;
  966. }
  967. ret = priv->secfeed->set(priv->secfeed, priv->pid, 1);
  968. if (ret<0) {
  969. pr_err("%s: could not set section feed\n", dev->name);
  970. priv->demux->release_section_feed(priv->demux, priv->secfeed);
  971. priv->secfeed=NULL;
  972. goto error;
  973. }
  974. if (priv->rx_mode != RX_MODE_PROMISC) {
  975. netdev_dbg(dev, "set secfilter\n");
  976. dvb_net_filter_sec_set(dev, &priv->secfilter, mac, mask_normal);
  977. }
  978. switch (priv->rx_mode) {
  979. case RX_MODE_MULTI:
  980. for (i = 0; i < priv->multi_num; i++) {
  981. netdev_dbg(dev, "set multi_secfilter[%d]\n", i);
  982. dvb_net_filter_sec_set(dev, &priv->multi_secfilter[i],
  983. priv->multi_macs[i], mask_normal);
  984. }
  985. break;
  986. case RX_MODE_ALL_MULTI:
  987. priv->multi_num=1;
  988. netdev_dbg(dev, "set multi_secfilter[0]\n");
  989. dvb_net_filter_sec_set(dev, &priv->multi_secfilter[0],
  990. mac_allmulti, mask_allmulti);
  991. break;
  992. case RX_MODE_PROMISC:
  993. priv->multi_num=0;
  994. netdev_dbg(dev, "set secfilter\n");
  995. dvb_net_filter_sec_set(dev, &priv->secfilter, mac, mask_promisc);
  996. break;
  997. }
  998. netdev_dbg(dev, "start filtering\n");
  999. priv->secfeed->start_filtering(priv->secfeed);
  1000. } else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
  1001. ktime_t timeout = ns_to_ktime(10 * NSEC_PER_MSEC);
  1002. /* we have payloads encapsulated in TS */
  1003. netdev_dbg(dev, "alloc tsfeed\n");
  1004. ret = demux->allocate_ts_feed(demux, &priv->tsfeed, dvb_net_ts_callback);
  1005. if (ret < 0) {
  1006. pr_err("%s: could not allocate ts feed\n", dev->name);
  1007. goto error;
  1008. }
  1009. /* Set netdevice pointer for ts decaps callback. */
  1010. priv->tsfeed->priv = (void *)dev;
  1011. ret = priv->tsfeed->set(priv->tsfeed,
  1012. priv->pid, /* pid */
  1013. TS_PACKET, /* type */
  1014. DMX_PES_OTHER, /* pes type */
  1015. timeout /* timeout */
  1016. );
  1017. if (ret < 0) {
  1018. pr_err("%s: could not set ts feed\n", dev->name);
  1019. priv->demux->release_ts_feed(priv->demux, priv->tsfeed);
  1020. priv->tsfeed = NULL;
  1021. goto error;
  1022. }
  1023. netdev_dbg(dev, "start filtering\n");
  1024. priv->tsfeed->start_filtering(priv->tsfeed);
  1025. } else
  1026. ret = -EINVAL;
  1027. error:
  1028. mutex_unlock(&priv->mutex);
  1029. return ret;
  1030. }
  1031. static int dvb_net_feed_stop(struct net_device *dev)
  1032. {
  1033. struct dvb_net_priv *priv = netdev_priv(dev);
  1034. int i, ret = 0;
  1035. mutex_lock(&priv->mutex);
  1036. if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) {
  1037. if (priv->secfeed) {
  1038. if (priv->secfeed->is_filtering) {
  1039. netdev_dbg(dev, "stop secfeed\n");
  1040. priv->secfeed->stop_filtering(priv->secfeed);
  1041. }
  1042. if (priv->secfilter) {
  1043. netdev_dbg(dev, "release secfilter\n");
  1044. priv->secfeed->release_filter(priv->secfeed,
  1045. priv->secfilter);
  1046. priv->secfilter=NULL;
  1047. }
  1048. for (i=0; i<priv->multi_num; i++) {
  1049. if (priv->multi_secfilter[i]) {
  1050. netdev_dbg(dev, "release multi_filter[%d]\n",
  1051. i);
  1052. priv->secfeed->release_filter(priv->secfeed,
  1053. priv->multi_secfilter[i]);
  1054. priv->multi_secfilter[i] = NULL;
  1055. }
  1056. }
  1057. priv->demux->release_section_feed(priv->demux, priv->secfeed);
  1058. priv->secfeed = NULL;
  1059. } else
  1060. pr_err("%s: no feed to stop\n", dev->name);
  1061. } else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
  1062. if (priv->tsfeed) {
  1063. if (priv->tsfeed->is_filtering) {
  1064. netdev_dbg(dev, "stop tsfeed\n");
  1065. priv->tsfeed->stop_filtering(priv->tsfeed);
  1066. }
  1067. priv->demux->release_ts_feed(priv->demux, priv->tsfeed);
  1068. priv->tsfeed = NULL;
  1069. }
  1070. else
  1071. pr_err("%s: no ts feed to stop\n", dev->name);
  1072. } else
  1073. ret = -EINVAL;
  1074. mutex_unlock(&priv->mutex);
  1075. return ret;
  1076. }
  1077. static int dvb_set_mc_filter(struct net_device *dev, unsigned char *addr)
  1078. {
  1079. struct dvb_net_priv *priv = netdev_priv(dev);
  1080. if (priv->multi_num == DVB_NET_MULTICAST_MAX)
  1081. return -ENOMEM;
  1082. memcpy(priv->multi_macs[priv->multi_num], addr, ETH_ALEN);
  1083. priv->multi_num++;
  1084. return 0;
  1085. }
  1086. static void wq_set_multicast_list (struct work_struct *work)
  1087. {
  1088. struct dvb_net_priv *priv =
  1089. container_of(work, struct dvb_net_priv, set_multicast_list_wq);
  1090. struct net_device *dev = priv->net;
  1091. dvb_net_feed_stop(dev);
  1092. priv->rx_mode = RX_MODE_UNI;
  1093. netif_addr_lock_bh(dev);
  1094. if (dev->flags & IFF_PROMISC) {
  1095. netdev_dbg(dev, "promiscuous mode\n");
  1096. priv->rx_mode = RX_MODE_PROMISC;
  1097. } else if ((dev->flags & IFF_ALLMULTI)) {
  1098. netdev_dbg(dev, "allmulti mode\n");
  1099. priv->rx_mode = RX_MODE_ALL_MULTI;
  1100. } else if (!netdev_mc_empty(dev)) {
  1101. struct netdev_hw_addr *ha;
  1102. netdev_dbg(dev, "set_mc_list, %d entries\n",
  1103. netdev_mc_count(dev));
  1104. priv->rx_mode = RX_MODE_MULTI;
  1105. priv->multi_num = 0;
  1106. netdev_for_each_mc_addr(ha, dev)
  1107. dvb_set_mc_filter(dev, ha->addr);
  1108. }
  1109. netif_addr_unlock_bh(dev);
  1110. dvb_net_feed_start(dev);
  1111. }
  1112. static void dvb_net_set_multicast_list (struct net_device *dev)
  1113. {
  1114. struct dvb_net_priv *priv = netdev_priv(dev);
  1115. schedule_work(&priv->set_multicast_list_wq);
  1116. }
  1117. static void wq_restart_net_feed (struct work_struct *work)
  1118. {
  1119. struct dvb_net_priv *priv =
  1120. container_of(work, struct dvb_net_priv, restart_net_feed_wq);
  1121. struct net_device *dev = priv->net;
  1122. if (netif_running(dev)) {
  1123. dvb_net_feed_stop(dev);
  1124. dvb_net_feed_start(dev);
  1125. }
  1126. }
  1127. static int dvb_net_set_mac (struct net_device *dev, void *p)
  1128. {
  1129. struct dvb_net_priv *priv = netdev_priv(dev);
  1130. struct sockaddr *addr=p;
  1131. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  1132. if (netif_running(dev))
  1133. schedule_work(&priv->restart_net_feed_wq);
  1134. return 0;
  1135. }
  1136. static int dvb_net_open(struct net_device *dev)
  1137. {
  1138. struct dvb_net_priv *priv = netdev_priv(dev);
  1139. priv->in_use++;
  1140. dvb_net_feed_start(dev);
  1141. return 0;
  1142. }
  1143. static int dvb_net_stop(struct net_device *dev)
  1144. {
  1145. struct dvb_net_priv *priv = netdev_priv(dev);
  1146. priv->in_use--;
  1147. return dvb_net_feed_stop(dev);
  1148. }
  1149. static const struct header_ops dvb_header_ops = {
  1150. .create = eth_header,
  1151. .parse = eth_header_parse,
  1152. };
  1153. static const struct net_device_ops dvb_netdev_ops = {
  1154. .ndo_open = dvb_net_open,
  1155. .ndo_stop = dvb_net_stop,
  1156. .ndo_start_xmit = dvb_net_tx,
  1157. .ndo_set_rx_mode = dvb_net_set_multicast_list,
  1158. .ndo_set_mac_address = dvb_net_set_mac,
  1159. .ndo_validate_addr = eth_validate_addr,
  1160. };
  1161. static void dvb_net_setup(struct net_device *dev)
  1162. {
  1163. ether_setup(dev);
  1164. dev->header_ops = &dvb_header_ops;
  1165. dev->netdev_ops = &dvb_netdev_ops;
  1166. dev->mtu = 4096;
  1167. dev->max_mtu = 4096;
  1168. dev->flags |= IFF_NOARP;
  1169. }
  1170. static int get_if(struct dvb_net *dvbnet)
  1171. {
  1172. int i;
  1173. for (i=0; i<DVB_NET_DEVICES_MAX; i++)
  1174. if (!dvbnet->state[i])
  1175. break;
  1176. if (i == DVB_NET_DEVICES_MAX)
  1177. return -1;
  1178. dvbnet->state[i]=1;
  1179. return i;
  1180. }
  1181. static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
  1182. {
  1183. struct net_device *net;
  1184. struct dvb_net_priv *priv;
  1185. int result;
  1186. int if_num;
  1187. if (feedtype != DVB_NET_FEEDTYPE_MPE && feedtype != DVB_NET_FEEDTYPE_ULE)
  1188. return -EINVAL;
  1189. if ((if_num = get_if(dvbnet)) < 0)
  1190. return -EINVAL;
  1191. net = alloc_netdev(sizeof(struct dvb_net_priv), "dvb",
  1192. NET_NAME_UNKNOWN, dvb_net_setup);
  1193. if (!net)
  1194. return -ENOMEM;
  1195. if (dvbnet->dvbdev->id)
  1196. snprintf(net->name, IFNAMSIZ, "dvb%d%u%d",
  1197. dvbnet->dvbdev->adapter->num, dvbnet->dvbdev->id, if_num);
  1198. else
  1199. /* compatibility fix to keep dvb0_0 format */
  1200. snprintf(net->name, IFNAMSIZ, "dvb%d_%d",
  1201. dvbnet->dvbdev->adapter->num, if_num);
  1202. net->addr_len = 6;
  1203. memcpy(net->dev_addr, dvbnet->dvbdev->adapter->proposed_mac, 6);
  1204. dvbnet->device[if_num] = net;
  1205. priv = netdev_priv(net);
  1206. priv->net = net;
  1207. priv->demux = dvbnet->demux;
  1208. priv->pid = pid;
  1209. priv->rx_mode = RX_MODE_UNI;
  1210. priv->need_pusi = 1;
  1211. priv->tscc = 0;
  1212. priv->feedtype = feedtype;
  1213. reset_ule(priv);
  1214. INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list);
  1215. INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed);
  1216. mutex_init(&priv->mutex);
  1217. net->base_addr = pid;
  1218. if ((result = register_netdev(net)) < 0) {
  1219. dvbnet->device[if_num] = NULL;
  1220. free_netdev(net);
  1221. return result;
  1222. }
  1223. pr_info("created network interface %s\n", net->name);
  1224. return if_num;
  1225. }
  1226. static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
  1227. {
  1228. struct net_device *net = dvbnet->device[num];
  1229. struct dvb_net_priv *priv;
  1230. if (!dvbnet->state[num])
  1231. return -EINVAL;
  1232. priv = netdev_priv(net);
  1233. if (priv->in_use)
  1234. return -EBUSY;
  1235. dvb_net_stop(net);
  1236. flush_work(&priv->set_multicast_list_wq);
  1237. flush_work(&priv->restart_net_feed_wq);
  1238. pr_info("removed network interface %s\n", net->name);
  1239. unregister_netdev(net);
  1240. dvbnet->state[num]=0;
  1241. dvbnet->device[num] = NULL;
  1242. free_netdev(net);
  1243. return 0;
  1244. }
  1245. static int dvb_net_do_ioctl(struct file *file,
  1246. unsigned int cmd, void *parg)
  1247. {
  1248. struct dvb_device *dvbdev = file->private_data;
  1249. struct dvb_net *dvbnet = dvbdev->priv;
  1250. int ret = 0;
  1251. if (((file->f_flags&O_ACCMODE)==O_RDONLY))
  1252. return -EPERM;
  1253. if (mutex_lock_interruptible(&dvbnet->ioctl_mutex))
  1254. return -ERESTARTSYS;
  1255. switch (cmd) {
  1256. case NET_ADD_IF:
  1257. {
  1258. struct dvb_net_if *dvbnetif = parg;
  1259. int result;
  1260. if (!capable(CAP_SYS_ADMIN)) {
  1261. ret = -EPERM;
  1262. goto ioctl_error;
  1263. }
  1264. if (!try_module_get(dvbdev->adapter->module)) {
  1265. ret = -EPERM;
  1266. goto ioctl_error;
  1267. }
  1268. result=dvb_net_add_if(dvbnet, dvbnetif->pid, dvbnetif->feedtype);
  1269. if (result<0) {
  1270. module_put(dvbdev->adapter->module);
  1271. ret = result;
  1272. goto ioctl_error;
  1273. }
  1274. dvbnetif->if_num=result;
  1275. break;
  1276. }
  1277. case NET_GET_IF:
  1278. {
  1279. struct net_device *netdev;
  1280. struct dvb_net_priv *priv_data;
  1281. struct dvb_net_if *dvbnetif = parg;
  1282. if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
  1283. !dvbnet->state[dvbnetif->if_num]) {
  1284. ret = -EINVAL;
  1285. goto ioctl_error;
  1286. }
  1287. netdev = dvbnet->device[dvbnetif->if_num];
  1288. priv_data = netdev_priv(netdev);
  1289. dvbnetif->pid=priv_data->pid;
  1290. dvbnetif->feedtype=priv_data->feedtype;
  1291. break;
  1292. }
  1293. case NET_REMOVE_IF:
  1294. {
  1295. if (!capable(CAP_SYS_ADMIN)) {
  1296. ret = -EPERM;
  1297. goto ioctl_error;
  1298. }
  1299. if ((unsigned long) parg >= DVB_NET_DEVICES_MAX) {
  1300. ret = -EINVAL;
  1301. goto ioctl_error;
  1302. }
  1303. ret = dvb_net_remove_if(dvbnet, (unsigned long) parg);
  1304. if (!ret)
  1305. module_put(dvbdev->adapter->module);
  1306. break;
  1307. }
  1308. /* binary compatibility cruft */
  1309. case __NET_ADD_IF_OLD:
  1310. {
  1311. struct __dvb_net_if_old *dvbnetif = parg;
  1312. int result;
  1313. if (!capable(CAP_SYS_ADMIN)) {
  1314. ret = -EPERM;
  1315. goto ioctl_error;
  1316. }
  1317. if (!try_module_get(dvbdev->adapter->module)) {
  1318. ret = -EPERM;
  1319. goto ioctl_error;
  1320. }
  1321. result=dvb_net_add_if(dvbnet, dvbnetif->pid, DVB_NET_FEEDTYPE_MPE);
  1322. if (result<0) {
  1323. module_put(dvbdev->adapter->module);
  1324. ret = result;
  1325. goto ioctl_error;
  1326. }
  1327. dvbnetif->if_num=result;
  1328. break;
  1329. }
  1330. case __NET_GET_IF_OLD:
  1331. {
  1332. struct net_device *netdev;
  1333. struct dvb_net_priv *priv_data;
  1334. struct __dvb_net_if_old *dvbnetif = parg;
  1335. if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
  1336. !dvbnet->state[dvbnetif->if_num]) {
  1337. ret = -EINVAL;
  1338. goto ioctl_error;
  1339. }
  1340. netdev = dvbnet->device[dvbnetif->if_num];
  1341. priv_data = netdev_priv(netdev);
  1342. dvbnetif->pid=priv_data->pid;
  1343. break;
  1344. }
  1345. default:
  1346. ret = -ENOTTY;
  1347. break;
  1348. }
  1349. ioctl_error:
  1350. mutex_unlock(&dvbnet->ioctl_mutex);
  1351. return ret;
  1352. }
  1353. static long dvb_net_ioctl(struct file *file,
  1354. unsigned int cmd, unsigned long arg)
  1355. {
  1356. return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
  1357. }
  1358. static int dvb_net_close(struct inode *inode, struct file *file)
  1359. {
  1360. struct dvb_device *dvbdev = file->private_data;
  1361. struct dvb_net *dvbnet = dvbdev->priv;
  1362. dvb_generic_release(inode, file);
  1363. if(dvbdev->users == 1 && dvbnet->exit == 1)
  1364. wake_up(&dvbdev->wait_queue);
  1365. return 0;
  1366. }
  1367. static const struct file_operations dvb_net_fops = {
  1368. .owner = THIS_MODULE,
  1369. .unlocked_ioctl = dvb_net_ioctl,
  1370. .open = dvb_generic_open,
  1371. .release = dvb_net_close,
  1372. .llseek = noop_llseek,
  1373. };
  1374. static const struct dvb_device dvbdev_net = {
  1375. .priv = NULL,
  1376. .users = 1,
  1377. .writers = 1,
  1378. #if defined(CONFIG_MEDIA_CONTROLLER_DVB)
  1379. .name = "dvb-net",
  1380. #endif
  1381. .fops = &dvb_net_fops,
  1382. };
  1383. void dvb_net_release (struct dvb_net *dvbnet)
  1384. {
  1385. int i;
  1386. dvbnet->exit = 1;
  1387. if (dvbnet->dvbdev->users < 1)
  1388. wait_event(dvbnet->dvbdev->wait_queue,
  1389. dvbnet->dvbdev->users==1);
  1390. dvb_unregister_device(dvbnet->dvbdev);
  1391. for (i=0; i<DVB_NET_DEVICES_MAX; i++) {
  1392. if (!dvbnet->state[i])
  1393. continue;
  1394. dvb_net_remove_if(dvbnet, i);
  1395. }
  1396. }
  1397. EXPORT_SYMBOL(dvb_net_release);
  1398. int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet,
  1399. struct dmx_demux *dmx)
  1400. {
  1401. int i;
  1402. mutex_init(&dvbnet->ioctl_mutex);
  1403. dvbnet->demux = dmx;
  1404. for (i=0; i<DVB_NET_DEVICES_MAX; i++)
  1405. dvbnet->state[i] = 0;
  1406. return dvb_register_device(adap, &dvbnet->dvbdev, &dvbdev_net,
  1407. dvbnet, DVB_DEVICE_NET, 0);
  1408. }
  1409. EXPORT_SYMBOL(dvb_net_init);