ssi_protocol.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200
  1. /*
  2. * ssi_protocol.c
  3. *
  4. * Implementation of the SSI McSAAB improved protocol.
  5. *
  6. * Copyright (C) 2010 Nokia Corporation. All rights reserved.
  7. * Copyright (C) 2013 Sebastian Reichel <sre@kernel.org>
  8. *
  9. * Contact: Carlos Chinea <carlos.chinea@nokia.com>
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * version 2 as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23. * 02110-1301 USA
  24. */
  25. #include <linux/atomic.h>
  26. #include <linux/clk.h>
  27. #include <linux/device.h>
  28. #include <linux/err.h>
  29. #include <linux/gpio.h>
  30. #include <linux/if_ether.h>
  31. #include <linux/if_arp.h>
  32. #include <linux/if_phonet.h>
  33. #include <linux/init.h>
  34. #include <linux/irq.h>
  35. #include <linux/list.h>
  36. #include <linux/module.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/notifier.h>
  39. #include <linux/scatterlist.h>
  40. #include <linux/skbuff.h>
  41. #include <linux/slab.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/timer.h>
  44. #include <linux/hsi/hsi.h>
  45. #include <linux/hsi/ssi_protocol.h>
  46. void ssi_waketest(struct hsi_client *cl, unsigned int enable);
  47. #define SSIP_TXQUEUE_LEN 100
  48. #define SSIP_MAX_MTU 65535
  49. #define SSIP_DEFAULT_MTU 4000
  50. #define PN_MEDIA_SOS 21
  51. #define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */
  52. #define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */
  53. #define SSIP_KATOUT 15 /* 15 msecs */
  54. #define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */
  55. #define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
  56. #define SSIP_CMT_LOADER_SYNC 0x11223344
  57. /*
  58. * SSI protocol command definitions
  59. */
  60. #define SSIP_COMMAND(data) ((data) >> 28)
  61. #define SSIP_PAYLOAD(data) ((data) & 0xfffffff)
  62. /* Commands */
  63. #define SSIP_SW_BREAK 0
  64. #define SSIP_BOOTINFO_REQ 1
  65. #define SSIP_BOOTINFO_RESP 2
  66. #define SSIP_WAKETEST_RESULT 3
  67. #define SSIP_START_TRANS 4
  68. #define SSIP_READY 5
  69. /* Payloads */
  70. #define SSIP_DATA_VERSION(data) ((data) & 0xff)
  71. #define SSIP_LOCAL_VERID 1
  72. #define SSIP_WAKETEST_OK 0
  73. #define SSIP_WAKETEST_FAILED 1
  74. #define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff)
  75. #define SSIP_MSG_ID(data) ((data) & 0xff)
  76. /* Generic Command */
  77. #define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff))
  78. /* Commands for the control channel */
  79. #define SSIP_BOOTINFO_REQ_CMD(ver) \
  80. SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver))
  81. #define SSIP_BOOTINFO_RESP_CMD(ver) \
  82. SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver))
  83. #define SSIP_START_TRANS_CMD(pdulen, id) \
  84. SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id)))
  85. #define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0)
  86. #define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0)
  87. #define SSIP_WAKETEST_FLAG 0
  88. /* Main state machine states */
  89. enum {
  90. INIT,
  91. HANDSHAKE,
  92. ACTIVE,
  93. };
  94. /* Send state machine states */
  95. enum {
  96. SEND_IDLE,
  97. WAIT4READY,
  98. SEND_READY,
  99. SENDING,
  100. SENDING_SWBREAK,
  101. };
  102. /* Receive state machine states */
  103. enum {
  104. RECV_IDLE,
  105. RECV_READY,
  106. RECEIVING,
  107. };
  108. /**
  109. * struct ssi_protocol - SSI protocol (McSAAB) data
  110. * @main_state: Main state machine
  111. * @send_state: TX state machine
  112. * @recv_state: RX state machine
  113. * @flags: Flags, currently only used to follow wake line test
  114. * @rxid: RX data id
  115. * @txid: TX data id
  116. * @txqueue_len: TX queue length
  117. * @tx_wd: TX watchdog
  118. * @rx_wd: RX watchdog
  119. * @keep_alive: Workaround for SSI HW bug
  120. * @lock: To serialize access to this struct
  121. * @netdev: Phonet network device
  122. * @txqueue: TX data queue
  123. * @cmdqueue: Queue of free commands
  124. * @cl: HSI client own reference
  125. * @link: Link for ssip_list
  126. * @tx_usecount: Refcount to keep track the slaves that use the wake line
  127. * @channel_id_cmd: HSI channel id for command stream
  128. * @channel_id_data: HSI channel id for data stream
  129. */
  130. struct ssi_protocol {
  131. unsigned int main_state;
  132. unsigned int send_state;
  133. unsigned int recv_state;
  134. unsigned long flags;
  135. u8 rxid;
  136. u8 txid;
  137. unsigned int txqueue_len;
  138. struct timer_list tx_wd;
  139. struct timer_list rx_wd;
  140. struct timer_list keep_alive; /* wake-up workaround */
  141. spinlock_t lock;
  142. struct net_device *netdev;
  143. struct list_head txqueue;
  144. struct list_head cmdqueue;
  145. struct work_struct work;
  146. struct hsi_client *cl;
  147. struct list_head link;
  148. atomic_t tx_usecnt;
  149. int channel_id_cmd;
  150. int channel_id_data;
  151. };
  152. /* List of ssi protocol instances */
  153. static LIST_HEAD(ssip_list);
  154. static void ssip_rxcmd_complete(struct hsi_msg *msg);
  155. static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd)
  156. {
  157. u32 *data;
  158. data = sg_virt(msg->sgt.sgl);
  159. *data = cmd;
  160. }
  161. static inline u32 ssip_get_cmd(struct hsi_msg *msg)
  162. {
  163. u32 *data;
  164. data = sg_virt(msg->sgt.sgl);
  165. return *data;
  166. }
  167. static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
  168. {
  169. skb_frag_t *frag;
  170. struct scatterlist *sg;
  171. int i;
  172. BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1));
  173. sg = msg->sgt.sgl;
  174. sg_set_buf(sg, skb->data, skb_headlen(skb));
  175. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  176. sg = sg_next(sg);
  177. BUG_ON(!sg);
  178. frag = &skb_shinfo(skb)->frags[i];
  179. sg_set_page(sg, frag->page.p, frag->size, frag->page_offset);
  180. }
  181. }
  182. static void ssip_free_data(struct hsi_msg *msg)
  183. {
  184. struct sk_buff *skb;
  185. skb = msg->context;
  186. pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context,
  187. skb);
  188. msg->destructor = NULL;
  189. dev_kfree_skb(skb);
  190. hsi_free_msg(msg);
  191. }
  192. static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi,
  193. struct sk_buff *skb, gfp_t flags)
  194. {
  195. struct hsi_msg *msg;
  196. msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags);
  197. if (!msg)
  198. return NULL;
  199. ssip_skb_to_msg(skb, msg);
  200. msg->destructor = ssip_free_data;
  201. msg->channel = ssi->channel_id_data;
  202. msg->context = skb;
  203. return msg;
  204. }
  205. static inline void ssip_release_cmd(struct hsi_msg *msg)
  206. {
  207. struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl);
  208. dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg));
  209. spin_lock_bh(&ssi->lock);
  210. list_add_tail(&msg->link, &ssi->cmdqueue);
  211. spin_unlock_bh(&ssi->lock);
  212. }
  213. static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi)
  214. {
  215. struct hsi_msg *msg;
  216. BUG_ON(list_empty(&ssi->cmdqueue));
  217. spin_lock_bh(&ssi->lock);
  218. msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
  219. list_del(&msg->link);
  220. spin_unlock_bh(&ssi->lock);
  221. msg->destructor = ssip_release_cmd;
  222. return msg;
  223. }
  224. static void ssip_free_cmds(struct ssi_protocol *ssi)
  225. {
  226. struct hsi_msg *msg, *tmp;
  227. list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
  228. list_del(&msg->link);
  229. msg->destructor = NULL;
  230. kfree(sg_virt(msg->sgt.sgl));
  231. hsi_free_msg(msg);
  232. }
  233. }
  234. static int ssip_alloc_cmds(struct ssi_protocol *ssi)
  235. {
  236. struct hsi_msg *msg;
  237. u32 *buf;
  238. unsigned int i;
  239. for (i = 0; i < SSIP_MAX_CMDS; i++) {
  240. msg = hsi_alloc_msg(1, GFP_KERNEL);
  241. if (!msg)
  242. goto out;
  243. buf = kmalloc(sizeof(*buf), GFP_KERNEL);
  244. if (!buf) {
  245. hsi_free_msg(msg);
  246. goto out;
  247. }
  248. sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
  249. msg->channel = ssi->channel_id_cmd;
  250. list_add_tail(&msg->link, &ssi->cmdqueue);
  251. }
  252. return 0;
  253. out:
  254. ssip_free_cmds(ssi);
  255. return -ENOMEM;
  256. }
  257. static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state)
  258. {
  259. ssi->recv_state = state;
  260. switch (state) {
  261. case RECV_IDLE:
  262. del_timer(&ssi->rx_wd);
  263. if (ssi->send_state == SEND_IDLE)
  264. del_timer(&ssi->keep_alive);
  265. break;
  266. case RECV_READY:
  267. /* CMT speech workaround */
  268. if (atomic_read(&ssi->tx_usecnt))
  269. break;
  270. /* Otherwise fall through */
  271. case RECEIVING:
  272. mod_timer(&ssi->keep_alive, jiffies +
  273. msecs_to_jiffies(SSIP_KATOUT));
  274. mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
  275. break;
  276. default:
  277. break;
  278. }
  279. }
  280. static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state)
  281. {
  282. ssi->send_state = state;
  283. switch (state) {
  284. case SEND_IDLE:
  285. case SEND_READY:
  286. del_timer(&ssi->tx_wd);
  287. if (ssi->recv_state == RECV_IDLE)
  288. del_timer(&ssi->keep_alive);
  289. break;
  290. case WAIT4READY:
  291. case SENDING:
  292. case SENDING_SWBREAK:
  293. mod_timer(&ssi->keep_alive,
  294. jiffies + msecs_to_jiffies(SSIP_KATOUT));
  295. mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
  296. break;
  297. default:
  298. break;
  299. }
  300. }
  301. struct hsi_client *ssip_slave_get_master(struct hsi_client *slave)
  302. {
  303. struct hsi_client *master = ERR_PTR(-ENODEV);
  304. struct ssi_protocol *ssi;
  305. list_for_each_entry(ssi, &ssip_list, link)
  306. if (slave->device.parent == ssi->cl->device.parent) {
  307. master = ssi->cl;
  308. break;
  309. }
  310. return master;
  311. }
  312. EXPORT_SYMBOL_GPL(ssip_slave_get_master);
  313. int ssip_slave_start_tx(struct hsi_client *master)
  314. {
  315. struct ssi_protocol *ssi = hsi_client_drvdata(master);
  316. dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt));
  317. spin_lock_bh(&ssi->lock);
  318. if (ssi->send_state == SEND_IDLE) {
  319. ssip_set_txstate(ssi, WAIT4READY);
  320. hsi_start_tx(master);
  321. }
  322. spin_unlock_bh(&ssi->lock);
  323. atomic_inc(&ssi->tx_usecnt);
  324. return 0;
  325. }
  326. EXPORT_SYMBOL_GPL(ssip_slave_start_tx);
  327. int ssip_slave_stop_tx(struct hsi_client *master)
  328. {
  329. struct ssi_protocol *ssi = hsi_client_drvdata(master);
  330. WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0);
  331. if (atomic_dec_and_test(&ssi->tx_usecnt)) {
  332. spin_lock_bh(&ssi->lock);
  333. if ((ssi->send_state == SEND_READY) ||
  334. (ssi->send_state == WAIT4READY)) {
  335. ssip_set_txstate(ssi, SEND_IDLE);
  336. hsi_stop_tx(master);
  337. }
  338. spin_unlock_bh(&ssi->lock);
  339. }
  340. dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt));
  341. return 0;
  342. }
  343. EXPORT_SYMBOL_GPL(ssip_slave_stop_tx);
  344. int ssip_slave_running(struct hsi_client *master)
  345. {
  346. struct ssi_protocol *ssi = hsi_client_drvdata(master);
  347. return netif_running(ssi->netdev);
  348. }
  349. EXPORT_SYMBOL_GPL(ssip_slave_running);
  350. static void ssip_reset(struct hsi_client *cl)
  351. {
  352. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  353. struct list_head *head, *tmp;
  354. struct hsi_msg *msg;
  355. if (netif_running(ssi->netdev))
  356. netif_carrier_off(ssi->netdev);
  357. hsi_flush(cl);
  358. spin_lock_bh(&ssi->lock);
  359. if (ssi->send_state != SEND_IDLE)
  360. hsi_stop_tx(cl);
  361. spin_unlock_bh(&ssi->lock);
  362. if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
  363. ssi_waketest(cl, 0); /* FIXME: To be removed */
  364. spin_lock_bh(&ssi->lock);
  365. del_timer(&ssi->rx_wd);
  366. del_timer(&ssi->tx_wd);
  367. del_timer(&ssi->keep_alive);
  368. ssi->main_state = 0;
  369. ssi->send_state = 0;
  370. ssi->recv_state = 0;
  371. ssi->flags = 0;
  372. ssi->rxid = 0;
  373. ssi->txid = 0;
  374. list_for_each_safe(head, tmp, &ssi->txqueue) {
  375. msg = list_entry(head, struct hsi_msg, link);
  376. dev_dbg(&cl->device, "Pending TX data\n");
  377. list_del(head);
  378. ssip_free_data(msg);
  379. }
  380. ssi->txqueue_len = 0;
  381. spin_unlock_bh(&ssi->lock);
  382. }
  383. static void ssip_dump_state(struct hsi_client *cl)
  384. {
  385. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  386. struct hsi_msg *msg;
  387. spin_lock_bh(&ssi->lock);
  388. dev_err(&cl->device, "Main state: %d\n", ssi->main_state);
  389. dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state);
  390. dev_err(&cl->device, "Send state: %d\n", ssi->send_state);
  391. dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ?
  392. "Online" : "Offline");
  393. dev_err(&cl->device, "Wake test %d\n",
  394. test_bit(SSIP_WAKETEST_FLAG, &ssi->flags));
  395. dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid);
  396. dev_err(&cl->device, "Data TX id: %d\n", ssi->txid);
  397. list_for_each_entry(msg, &ssi->txqueue, link)
  398. dev_err(&cl->device, "pending TX data (%p)\n", msg);
  399. spin_unlock_bh(&ssi->lock);
  400. }
  401. static void ssip_error(struct hsi_client *cl)
  402. {
  403. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  404. struct hsi_msg *msg;
  405. ssip_dump_state(cl);
  406. ssip_reset(cl);
  407. msg = ssip_claim_cmd(ssi);
  408. msg->complete = ssip_rxcmd_complete;
  409. hsi_async_read(cl, msg);
  410. }
  411. static void ssip_keep_alive(unsigned long data)
  412. {
  413. struct hsi_client *cl = (struct hsi_client *)data;
  414. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  415. dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n",
  416. ssi->main_state, ssi->recv_state, ssi->send_state);
  417. spin_lock(&ssi->lock);
  418. if (ssi->recv_state == RECV_IDLE)
  419. switch (ssi->send_state) {
  420. case SEND_READY:
  421. if (atomic_read(&ssi->tx_usecnt) == 0)
  422. break;
  423. /*
  424. * Fall through. Workaround for cmt-speech
  425. * in that case we relay on audio timers.
  426. */
  427. case SEND_IDLE:
  428. spin_unlock(&ssi->lock);
  429. return;
  430. }
  431. mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT));
  432. spin_unlock(&ssi->lock);
  433. }
  434. static void ssip_wd(unsigned long data)
  435. {
  436. struct hsi_client *cl = (struct hsi_client *)data;
  437. dev_err(&cl->device, "Watchdog trigerred\n");
  438. ssip_error(cl);
  439. }
  440. static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl)
  441. {
  442. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  443. struct hsi_msg *msg;
  444. dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n");
  445. msg = ssip_claim_cmd(ssi);
  446. ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID));
  447. msg->complete = ssip_release_cmd;
  448. hsi_async_write(cl, msg);
  449. dev_dbg(&cl->device, "Issuing RX command\n");
  450. msg = ssip_claim_cmd(ssi);
  451. msg->complete = ssip_rxcmd_complete;
  452. hsi_async_read(cl, msg);
  453. }
  454. static void ssip_start_rx(struct hsi_client *cl)
  455. {
  456. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  457. struct hsi_msg *msg;
  458. dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state,
  459. ssi->recv_state);
  460. spin_lock_bh(&ssi->lock);
  461. /*
  462. * We can have two UP events in a row due to a short low
  463. * high transition. Therefore we need to ignore the sencond UP event.
  464. */
  465. if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) {
  466. spin_unlock_bh(&ssi->lock);
  467. return;
  468. }
  469. ssip_set_rxstate(ssi, RECV_READY);
  470. spin_unlock_bh(&ssi->lock);
  471. msg = ssip_claim_cmd(ssi);
  472. ssip_set_cmd(msg, SSIP_READY_CMD);
  473. msg->complete = ssip_release_cmd;
  474. dev_dbg(&cl->device, "Send READY\n");
  475. hsi_async_write(cl, msg);
  476. }
  477. static void ssip_stop_rx(struct hsi_client *cl)
  478. {
  479. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  480. dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state);
  481. spin_lock_bh(&ssi->lock);
  482. if (likely(ssi->main_state == ACTIVE))
  483. ssip_set_rxstate(ssi, RECV_IDLE);
  484. spin_unlock_bh(&ssi->lock);
  485. }
  486. static void ssip_free_strans(struct hsi_msg *msg)
  487. {
  488. ssip_free_data(msg->context);
  489. ssip_release_cmd(msg);
  490. }
  491. static void ssip_strans_complete(struct hsi_msg *msg)
  492. {
  493. struct hsi_client *cl = msg->cl;
  494. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  495. struct hsi_msg *data;
  496. data = msg->context;
  497. ssip_release_cmd(msg);
  498. spin_lock_bh(&ssi->lock);
  499. ssip_set_txstate(ssi, SENDING);
  500. spin_unlock_bh(&ssi->lock);
  501. hsi_async_write(cl, data);
  502. }
  503. static int ssip_xmit(struct hsi_client *cl)
  504. {
  505. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  506. struct hsi_msg *msg, *dmsg;
  507. struct sk_buff *skb;
  508. spin_lock_bh(&ssi->lock);
  509. if (list_empty(&ssi->txqueue)) {
  510. spin_unlock_bh(&ssi->lock);
  511. return 0;
  512. }
  513. dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link);
  514. list_del(&dmsg->link);
  515. ssi->txqueue_len--;
  516. spin_unlock_bh(&ssi->lock);
  517. msg = ssip_claim_cmd(ssi);
  518. skb = dmsg->context;
  519. msg->context = dmsg;
  520. msg->complete = ssip_strans_complete;
  521. msg->destructor = ssip_free_strans;
  522. spin_lock_bh(&ssi->lock);
  523. ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len),
  524. ssi->txid));
  525. ssi->txid++;
  526. ssip_set_txstate(ssi, SENDING);
  527. spin_unlock_bh(&ssi->lock);
  528. dev_dbg(&cl->device, "Send STRANS (%d frames)\n",
  529. SSIP_BYTES_TO_FRAMES(skb->len));
  530. return hsi_async_write(cl, msg);
  531. }
  532. /* In soft IRQ context */
  533. static void ssip_pn_rx(struct sk_buff *skb)
  534. {
  535. struct net_device *dev = skb->dev;
  536. if (unlikely(!netif_running(dev))) {
  537. dev_dbg(&dev->dev, "Drop RX packet\n");
  538. dev->stats.rx_dropped++;
  539. dev_kfree_skb(skb);
  540. return;
  541. }
  542. if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) {
  543. dev_dbg(&dev->dev, "Error drop RX packet\n");
  544. dev->stats.rx_errors++;
  545. dev->stats.rx_length_errors++;
  546. dev_kfree_skb(skb);
  547. return;
  548. }
  549. dev->stats.rx_packets++;
  550. dev->stats.rx_bytes += skb->len;
  551. /* length field is exchanged in network byte order */
  552. ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]);
  553. dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n",
  554. ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2]));
  555. skb->protocol = htons(ETH_P_PHONET);
  556. skb_reset_mac_header(skb);
  557. __skb_pull(skb, 1);
  558. netif_rx(skb);
  559. }
  560. static void ssip_rx_data_complete(struct hsi_msg *msg)
  561. {
  562. struct hsi_client *cl = msg->cl;
  563. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  564. struct sk_buff *skb;
  565. if (msg->status == HSI_STATUS_ERROR) {
  566. dev_err(&cl->device, "RX data error\n");
  567. ssip_free_data(msg);
  568. ssip_error(cl);
  569. return;
  570. }
  571. del_timer(&ssi->rx_wd); /* FIXME: Revisit */
  572. skb = msg->context;
  573. ssip_pn_rx(skb);
  574. hsi_free_msg(msg);
  575. }
  576. static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
  577. {
  578. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  579. struct hsi_msg *msg;
  580. /* Workaroud: Ignore CMT Loader message leftover */
  581. if (cmd == SSIP_CMT_LOADER_SYNC)
  582. return;
  583. switch (ssi->main_state) {
  584. case ACTIVE:
  585. dev_err(&cl->device, "Boot info req on active state\n");
  586. ssip_error(cl);
  587. /* Fall through */
  588. case INIT:
  589. case HANDSHAKE:
  590. spin_lock_bh(&ssi->lock);
  591. ssi->main_state = HANDSHAKE;
  592. spin_unlock_bh(&ssi->lock);
  593. if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
  594. ssi_waketest(cl, 1); /* FIXME: To be removed */
  595. spin_lock_bh(&ssi->lock);
  596. /* Start boot handshake watchdog */
  597. mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
  598. spin_unlock_bh(&ssi->lock);
  599. dev_dbg(&cl->device, "Send BOOTINFO_RESP\n");
  600. if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
  601. dev_warn(&cl->device, "boot info req verid mismatch\n");
  602. msg = ssip_claim_cmd(ssi);
  603. ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID));
  604. msg->complete = ssip_release_cmd;
  605. hsi_async_write(cl, msg);
  606. break;
  607. default:
  608. dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state);
  609. break;
  610. }
  611. }
  612. static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd)
  613. {
  614. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  615. if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
  616. dev_warn(&cl->device, "boot info resp verid mismatch\n");
  617. spin_lock_bh(&ssi->lock);
  618. if (ssi->main_state != ACTIVE)
  619. /* Use tx_wd as a boot watchdog in non ACTIVE state */
  620. mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
  621. else
  622. dev_dbg(&cl->device, "boot info resp ignored M(%d)\n",
  623. ssi->main_state);
  624. spin_unlock_bh(&ssi->lock);
  625. }
  626. static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd)
  627. {
  628. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  629. unsigned int wkres = SSIP_PAYLOAD(cmd);
  630. spin_lock_bh(&ssi->lock);
  631. if (ssi->main_state != HANDSHAKE) {
  632. dev_dbg(&cl->device, "wake lines test ignored M(%d)\n",
  633. ssi->main_state);
  634. spin_unlock_bh(&ssi->lock);
  635. return;
  636. }
  637. spin_unlock_bh(&ssi->lock);
  638. if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
  639. ssi_waketest(cl, 0); /* FIXME: To be removed */
  640. spin_lock_bh(&ssi->lock);
  641. ssi->main_state = ACTIVE;
  642. del_timer(&ssi->tx_wd); /* Stop boot handshake timer */
  643. spin_unlock_bh(&ssi->lock);
  644. dev_notice(&cl->device, "WAKELINES TEST %s\n",
  645. wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK");
  646. if (wkres & SSIP_WAKETEST_FAILED) {
  647. ssip_error(cl);
  648. return;
  649. }
  650. dev_dbg(&cl->device, "CMT is ONLINE\n");
  651. netif_wake_queue(ssi->netdev);
  652. netif_carrier_on(ssi->netdev);
  653. }
  654. static void ssip_rx_ready(struct hsi_client *cl)
  655. {
  656. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  657. spin_lock_bh(&ssi->lock);
  658. if (unlikely(ssi->main_state != ACTIVE)) {
  659. dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n",
  660. ssi->send_state, ssi->main_state);
  661. spin_unlock_bh(&ssi->lock);
  662. return;
  663. }
  664. if (ssi->send_state != WAIT4READY) {
  665. dev_dbg(&cl->device, "Ignore spurious READY command\n");
  666. spin_unlock_bh(&ssi->lock);
  667. return;
  668. }
  669. ssip_set_txstate(ssi, SEND_READY);
  670. spin_unlock_bh(&ssi->lock);
  671. ssip_xmit(cl);
  672. }
  673. static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
  674. {
  675. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  676. struct sk_buff *skb;
  677. struct hsi_msg *msg;
  678. int len = SSIP_PDU_LENGTH(cmd);
  679. dev_dbg(&cl->device, "RX strans: %d frames\n", len);
  680. spin_lock_bh(&ssi->lock);
  681. if (unlikely(ssi->main_state != ACTIVE)) {
  682. dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n",
  683. ssi->send_state, ssi->main_state);
  684. spin_unlock_bh(&ssi->lock);
  685. return;
  686. }
  687. ssip_set_rxstate(ssi, RECEIVING);
  688. if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) {
  689. dev_err(&cl->device, "START TRANS id %d expected %d\n",
  690. SSIP_MSG_ID(cmd), ssi->rxid);
  691. spin_unlock_bh(&ssi->lock);
  692. goto out1;
  693. }
  694. ssi->rxid++;
  695. spin_unlock_bh(&ssi->lock);
  696. skb = netdev_alloc_skb(ssi->netdev, len * 4);
  697. if (unlikely(!skb)) {
  698. dev_err(&cl->device, "No memory for rx skb\n");
  699. goto out1;
  700. }
  701. skb->dev = ssi->netdev;
  702. skb_put(skb, len * 4);
  703. msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
  704. if (unlikely(!msg)) {
  705. dev_err(&cl->device, "No memory for RX data msg\n");
  706. goto out2;
  707. }
  708. msg->complete = ssip_rx_data_complete;
  709. hsi_async_read(cl, msg);
  710. return;
  711. out2:
  712. dev_kfree_skb(skb);
  713. out1:
  714. ssip_error(cl);
  715. }
  716. static void ssip_rxcmd_complete(struct hsi_msg *msg)
  717. {
  718. struct hsi_client *cl = msg->cl;
  719. u32 cmd = ssip_get_cmd(msg);
  720. unsigned int cmdid = SSIP_COMMAND(cmd);
  721. if (msg->status == HSI_STATUS_ERROR) {
  722. dev_err(&cl->device, "RX error detected\n");
  723. ssip_release_cmd(msg);
  724. ssip_error(cl);
  725. return;
  726. }
  727. hsi_async_read(cl, msg);
  728. dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd);
  729. switch (cmdid) {
  730. case SSIP_SW_BREAK:
  731. /* Ignored */
  732. break;
  733. case SSIP_BOOTINFO_REQ:
  734. ssip_rx_bootinforeq(cl, cmd);
  735. break;
  736. case SSIP_BOOTINFO_RESP:
  737. ssip_rx_bootinforesp(cl, cmd);
  738. break;
  739. case SSIP_WAKETEST_RESULT:
  740. ssip_rx_waketest(cl, cmd);
  741. break;
  742. case SSIP_START_TRANS:
  743. ssip_rx_strans(cl, cmd);
  744. break;
  745. case SSIP_READY:
  746. ssip_rx_ready(cl);
  747. break;
  748. default:
  749. dev_warn(&cl->device, "command 0x%08x not supported\n", cmd);
  750. break;
  751. }
  752. }
  753. static void ssip_swbreak_complete(struct hsi_msg *msg)
  754. {
  755. struct hsi_client *cl = msg->cl;
  756. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  757. ssip_release_cmd(msg);
  758. spin_lock_bh(&ssi->lock);
  759. if (list_empty(&ssi->txqueue)) {
  760. if (atomic_read(&ssi->tx_usecnt)) {
  761. ssip_set_txstate(ssi, SEND_READY);
  762. } else {
  763. ssip_set_txstate(ssi, SEND_IDLE);
  764. hsi_stop_tx(cl);
  765. }
  766. spin_unlock_bh(&ssi->lock);
  767. } else {
  768. spin_unlock_bh(&ssi->lock);
  769. ssip_xmit(cl);
  770. }
  771. netif_wake_queue(ssi->netdev);
  772. }
  773. static void ssip_tx_data_complete(struct hsi_msg *msg)
  774. {
  775. struct hsi_client *cl = msg->cl;
  776. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  777. struct hsi_msg *cmsg;
  778. if (msg->status == HSI_STATUS_ERROR) {
  779. dev_err(&cl->device, "TX data error\n");
  780. ssip_error(cl);
  781. goto out;
  782. }
  783. spin_lock_bh(&ssi->lock);
  784. if (list_empty(&ssi->txqueue)) {
  785. ssip_set_txstate(ssi, SENDING_SWBREAK);
  786. spin_unlock_bh(&ssi->lock);
  787. cmsg = ssip_claim_cmd(ssi);
  788. ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD);
  789. cmsg->complete = ssip_swbreak_complete;
  790. dev_dbg(&cl->device, "Send SWBREAK\n");
  791. hsi_async_write(cl, cmsg);
  792. } else {
  793. spin_unlock_bh(&ssi->lock);
  794. ssip_xmit(cl);
  795. }
  796. out:
  797. ssip_free_data(msg);
  798. }
  799. static void ssip_port_event(struct hsi_client *cl, unsigned long event)
  800. {
  801. switch (event) {
  802. case HSI_EVENT_START_RX:
  803. ssip_start_rx(cl);
  804. break;
  805. case HSI_EVENT_STOP_RX:
  806. ssip_stop_rx(cl);
  807. break;
  808. default:
  809. return;
  810. }
  811. }
  812. static int ssip_pn_open(struct net_device *dev)
  813. {
  814. struct hsi_client *cl = to_hsi_client(dev->dev.parent);
  815. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  816. int err;
  817. err = hsi_claim_port(cl, 1);
  818. if (err < 0) {
  819. dev_err(&cl->device, "SSI port already claimed\n");
  820. return err;
  821. }
  822. err = hsi_register_port_event(cl, ssip_port_event);
  823. if (err < 0) {
  824. dev_err(&cl->device, "Register HSI port event failed (%d)\n",
  825. err);
  826. return err;
  827. }
  828. dev_dbg(&cl->device, "Configuring SSI port\n");
  829. hsi_setup(cl);
  830. if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
  831. ssi_waketest(cl, 1); /* FIXME: To be removed */
  832. spin_lock_bh(&ssi->lock);
  833. ssi->main_state = HANDSHAKE;
  834. spin_unlock_bh(&ssi->lock);
  835. ssip_send_bootinfo_req_cmd(cl);
  836. return 0;
  837. }
  838. static int ssip_pn_stop(struct net_device *dev)
  839. {
  840. struct hsi_client *cl = to_hsi_client(dev->dev.parent);
  841. ssip_reset(cl);
  842. hsi_unregister_port_event(cl);
  843. hsi_release_port(cl);
  844. return 0;
  845. }
  846. static void ssip_xmit_work(struct work_struct *work)
  847. {
  848. struct ssi_protocol *ssi =
  849. container_of(work, struct ssi_protocol, work);
  850. struct hsi_client *cl = ssi->cl;
  851. ssip_xmit(cl);
  852. }
  853. static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
  854. {
  855. struct hsi_client *cl = to_hsi_client(dev->dev.parent);
  856. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  857. struct hsi_msg *msg;
  858. if ((skb->protocol != htons(ETH_P_PHONET)) ||
  859. (skb->len < SSIP_MIN_PN_HDR))
  860. goto drop;
  861. /* Pad to 32-bits - FIXME: Revisit*/
  862. if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
  863. goto inc_dropped;
  864. /*
  865. * Modem sends Phonet messages over SSI with its own endianess...
  866. * Assume that modem has the same endianess as we do.
  867. */
  868. if (skb_cow_head(skb, 0))
  869. goto drop;
  870. /* length field is exchanged in network byte order */
  871. ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]);
  872. msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
  873. if (!msg) {
  874. dev_dbg(&cl->device, "Dropping tx data: No memory\n");
  875. goto drop;
  876. }
  877. msg->complete = ssip_tx_data_complete;
  878. spin_lock_bh(&ssi->lock);
  879. if (unlikely(ssi->main_state != ACTIVE)) {
  880. spin_unlock_bh(&ssi->lock);
  881. dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n");
  882. goto drop2;
  883. }
  884. list_add_tail(&msg->link, &ssi->txqueue);
  885. ssi->txqueue_len++;
  886. if (dev->tx_queue_len < ssi->txqueue_len) {
  887. dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len);
  888. netif_stop_queue(dev);
  889. }
  890. if (ssi->send_state == SEND_IDLE) {
  891. ssip_set_txstate(ssi, WAIT4READY);
  892. spin_unlock_bh(&ssi->lock);
  893. dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len);
  894. hsi_start_tx(cl);
  895. } else if (ssi->send_state == SEND_READY) {
  896. /* Needed for cmt-speech workaround */
  897. dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n",
  898. ssi->txqueue_len);
  899. spin_unlock_bh(&ssi->lock);
  900. schedule_work(&ssi->work);
  901. } else {
  902. spin_unlock_bh(&ssi->lock);
  903. }
  904. dev->stats.tx_packets++;
  905. dev->stats.tx_bytes += skb->len;
  906. return 0;
  907. drop2:
  908. hsi_free_msg(msg);
  909. drop:
  910. dev_kfree_skb(skb);
  911. inc_dropped:
  912. dev->stats.tx_dropped++;
  913. return 0;
  914. }
  915. /* CMT reset event handler */
  916. void ssip_reset_event(struct hsi_client *master)
  917. {
  918. struct ssi_protocol *ssi = hsi_client_drvdata(master);
  919. dev_err(&ssi->cl->device, "CMT reset detected!\n");
  920. ssip_error(ssi->cl);
  921. }
  922. EXPORT_SYMBOL_GPL(ssip_reset_event);
  923. static const struct net_device_ops ssip_pn_ops = {
  924. .ndo_open = ssip_pn_open,
  925. .ndo_stop = ssip_pn_stop,
  926. .ndo_start_xmit = ssip_pn_xmit,
  927. };
  928. static void ssip_pn_setup(struct net_device *dev)
  929. {
  930. dev->features = 0;
  931. dev->netdev_ops = &ssip_pn_ops;
  932. dev->type = ARPHRD_PHONET;
  933. dev->flags = IFF_POINTOPOINT | IFF_NOARP;
  934. dev->mtu = SSIP_DEFAULT_MTU;
  935. dev->hard_header_len = 1;
  936. dev->dev_addr[0] = PN_MEDIA_SOS;
  937. dev->addr_len = 1;
  938. dev->tx_queue_len = SSIP_TXQUEUE_LEN;
  939. dev->needs_free_netdev = true;
  940. dev->header_ops = &phonet_header_ops;
  941. }
  942. static int ssi_protocol_probe(struct device *dev)
  943. {
  944. static const char ifname[] = "phonet%d";
  945. struct hsi_client *cl = to_hsi_client(dev);
  946. struct ssi_protocol *ssi;
  947. int err;
  948. ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
  949. if (!ssi) {
  950. dev_err(dev, "No memory for ssi protocol\n");
  951. return -ENOMEM;
  952. }
  953. spin_lock_init(&ssi->lock);
  954. init_timer_deferrable(&ssi->rx_wd);
  955. init_timer_deferrable(&ssi->tx_wd);
  956. init_timer(&ssi->keep_alive);
  957. ssi->rx_wd.data = (unsigned long)cl;
  958. ssi->rx_wd.function = ssip_wd;
  959. ssi->tx_wd.data = (unsigned long)cl;
  960. ssi->tx_wd.function = ssip_wd;
  961. ssi->keep_alive.data = (unsigned long)cl;
  962. ssi->keep_alive.function = ssip_keep_alive;
  963. INIT_LIST_HEAD(&ssi->txqueue);
  964. INIT_LIST_HEAD(&ssi->cmdqueue);
  965. atomic_set(&ssi->tx_usecnt, 0);
  966. hsi_client_set_drvdata(cl, ssi);
  967. ssi->cl = cl;
  968. INIT_WORK(&ssi->work, ssip_xmit_work);
  969. ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control");
  970. if (ssi->channel_id_cmd < 0) {
  971. err = ssi->channel_id_cmd;
  972. dev_err(dev, "Could not get cmd channel (%d)\n", err);
  973. goto out;
  974. }
  975. ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data");
  976. if (ssi->channel_id_data < 0) {
  977. err = ssi->channel_id_data;
  978. dev_err(dev, "Could not get data channel (%d)\n", err);
  979. goto out;
  980. }
  981. err = ssip_alloc_cmds(ssi);
  982. if (err < 0) {
  983. dev_err(dev, "No memory for commands\n");
  984. goto out;
  985. }
  986. ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup);
  987. if (!ssi->netdev) {
  988. dev_err(dev, "No memory for netdev\n");
  989. err = -ENOMEM;
  990. goto out1;
  991. }
  992. /* MTU range: 6 - 65535 */
  993. ssi->netdev->min_mtu = PHONET_MIN_MTU;
  994. ssi->netdev->max_mtu = SSIP_MAX_MTU;
  995. SET_NETDEV_DEV(ssi->netdev, dev);
  996. netif_carrier_off(ssi->netdev);
  997. err = register_netdev(ssi->netdev);
  998. if (err < 0) {
  999. dev_err(dev, "Register netdev failed (%d)\n", err);
  1000. goto out2;
  1001. }
  1002. list_add(&ssi->link, &ssip_list);
  1003. dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n",
  1004. ssi->channel_id_cmd, ssi->channel_id_data);
  1005. return 0;
  1006. out2:
  1007. free_netdev(ssi->netdev);
  1008. out1:
  1009. ssip_free_cmds(ssi);
  1010. out:
  1011. kfree(ssi);
  1012. return err;
  1013. }
  1014. static int ssi_protocol_remove(struct device *dev)
  1015. {
  1016. struct hsi_client *cl = to_hsi_client(dev);
  1017. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  1018. list_del(&ssi->link);
  1019. unregister_netdev(ssi->netdev);
  1020. ssip_free_cmds(ssi);
  1021. hsi_client_set_drvdata(cl, NULL);
  1022. kfree(ssi);
  1023. return 0;
  1024. }
  1025. static struct hsi_client_driver ssip_driver = {
  1026. .driver = {
  1027. .name = "ssi-protocol",
  1028. .owner = THIS_MODULE,
  1029. .probe = ssi_protocol_probe,
  1030. .remove = ssi_protocol_remove,
  1031. },
  1032. };
  1033. static int __init ssip_init(void)
  1034. {
  1035. pr_info("SSI protocol aka McSAAB added\n");
  1036. return hsi_register_client_driver(&ssip_driver);
  1037. }
  1038. module_init(ssip_init);
  1039. static void __exit ssip_exit(void)
  1040. {
  1041. hsi_unregister_client_driver(&ssip_driver);
  1042. pr_info("SSI protocol driver removed\n");
  1043. }
  1044. module_exit(ssip_exit);
  1045. MODULE_ALIAS("hsi:ssi-protocol");
  1046. MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
  1047. MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>");
  1048. MODULE_DESCRIPTION("SSI protocol improved aka McSAAB");
  1049. MODULE_LICENSE("GPL");