ssi_protocol.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202
  1. /*
  2. * ssi_protocol.c
  3. *
  4. * Implementation of the SSI McSAAB improved protocol.
  5. *
  6. * Copyright (C) 2010 Nokia Corporation. All rights reserved.
  7. * Copyright (C) 2013 Sebastian Reichel <sre@kernel.org>
  8. *
  9. * Contact: Carlos Chinea <carlos.chinea@nokia.com>
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * version 2 as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23. * 02110-1301 USA
  24. */
  25. #include <linux/atomic.h>
  26. #include <linux/clk.h>
  27. #include <linux/device.h>
  28. #include <linux/err.h>
  29. #include <linux/gpio.h>
  30. #include <linux/if_ether.h>
  31. #include <linux/if_arp.h>
  32. #include <linux/if_phonet.h>
  33. #include <linux/init.h>
  34. #include <linux/irq.h>
  35. #include <linux/list.h>
  36. #include <linux/module.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/notifier.h>
  39. #include <linux/scatterlist.h>
  40. #include <linux/skbuff.h>
  41. #include <linux/slab.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/timer.h>
  44. #include <linux/hsi/hsi.h>
  45. #include <linux/hsi/ssi_protocol.h>
  46. void ssi_waketest(struct hsi_client *cl, unsigned int enable);
  47. #define SSIP_TXQUEUE_LEN 100
  48. #define SSIP_MAX_MTU 65535
  49. #define SSIP_DEFAULT_MTU 4000
  50. #define PN_MEDIA_SOS 21
  51. #define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */
  52. #define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */
  53. #define SSIP_KATOUT 15 /* 15 msecs */
  54. #define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */
  55. #define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
  56. #define SSIP_CMT_LOADER_SYNC 0x11223344
  57. /*
  58. * SSI protocol command definitions
  59. */
  60. #define SSIP_COMMAND(data) ((data) >> 28)
  61. #define SSIP_PAYLOAD(data) ((data) & 0xfffffff)
  62. /* Commands */
  63. #define SSIP_SW_BREAK 0
  64. #define SSIP_BOOTINFO_REQ 1
  65. #define SSIP_BOOTINFO_RESP 2
  66. #define SSIP_WAKETEST_RESULT 3
  67. #define SSIP_START_TRANS 4
  68. #define SSIP_READY 5
  69. /* Payloads */
  70. #define SSIP_DATA_VERSION(data) ((data) & 0xff)
  71. #define SSIP_LOCAL_VERID 1
  72. #define SSIP_WAKETEST_OK 0
  73. #define SSIP_WAKETEST_FAILED 1
  74. #define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff)
  75. #define SSIP_MSG_ID(data) ((data) & 0xff)
  76. /* Generic Command */
  77. #define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff))
  78. /* Commands for the control channel */
  79. #define SSIP_BOOTINFO_REQ_CMD(ver) \
  80. SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver))
  81. #define SSIP_BOOTINFO_RESP_CMD(ver) \
  82. SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver))
  83. #define SSIP_START_TRANS_CMD(pdulen, id) \
  84. SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id)))
  85. #define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0)
  86. #define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0)
  87. #define SSIP_WAKETEST_FLAG 0
  88. /* Main state machine states */
  89. enum {
  90. INIT,
  91. HANDSHAKE,
  92. ACTIVE,
  93. };
  94. /* Send state machine states */
  95. enum {
  96. SEND_IDLE,
  97. WAIT4READY,
  98. SEND_READY,
  99. SENDING,
  100. SENDING_SWBREAK,
  101. };
  102. /* Receive state machine states */
  103. enum {
  104. RECV_IDLE,
  105. RECV_READY,
  106. RECEIVING,
  107. };
  108. /**
  109. * struct ssi_protocol - SSI protocol (McSAAB) data
  110. * @main_state: Main state machine
  111. * @send_state: TX state machine
  112. * @recv_state: RX state machine
  113. * @flags: Flags, currently only used to follow wake line test
  114. * @rxid: RX data id
  115. * @txid: TX data id
  116. * @txqueue_len: TX queue length
  117. * @tx_wd: TX watchdog
  118. * @rx_wd: RX watchdog
  119. * @keep_alive: Workaround for SSI HW bug
  120. * @lock: To serialize access to this struct
  121. * @netdev: Phonet network device
  122. * @txqueue: TX data queue
  123. * @cmdqueue: Queue of free commands
  124. * @cl: HSI client own reference
  125. * @link: Link for ssip_list
  126. * @tx_usecount: Refcount to keep track the slaves that use the wake line
  127. * @channel_id_cmd: HSI channel id for command stream
  128. * @channel_id_data: HSI channel id for data stream
  129. */
  130. struct ssi_protocol {
  131. unsigned int main_state;
  132. unsigned int send_state;
  133. unsigned int recv_state;
  134. unsigned long flags;
  135. u8 rxid;
  136. u8 txid;
  137. unsigned int txqueue_len;
  138. struct timer_list tx_wd;
  139. struct timer_list rx_wd;
  140. struct timer_list keep_alive; /* wake-up workaround */
  141. spinlock_t lock;
  142. struct net_device *netdev;
  143. struct list_head txqueue;
  144. struct list_head cmdqueue;
  145. struct work_struct work;
  146. struct hsi_client *cl;
  147. struct list_head link;
  148. atomic_t tx_usecnt;
  149. int channel_id_cmd;
  150. int channel_id_data;
  151. };
  152. /* List of ssi protocol instances */
  153. static LIST_HEAD(ssip_list);
  154. static void ssip_rxcmd_complete(struct hsi_msg *msg);
  155. static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd)
  156. {
  157. u32 *data;
  158. data = sg_virt(msg->sgt.sgl);
  159. *data = cmd;
  160. }
  161. static inline u32 ssip_get_cmd(struct hsi_msg *msg)
  162. {
  163. u32 *data;
  164. data = sg_virt(msg->sgt.sgl);
  165. return *data;
  166. }
  167. static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
  168. {
  169. skb_frag_t *frag;
  170. struct scatterlist *sg;
  171. int i;
  172. BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1));
  173. sg = msg->sgt.sgl;
  174. sg_set_buf(sg, skb->data, skb_headlen(skb));
  175. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  176. sg = sg_next(sg);
  177. BUG_ON(!sg);
  178. frag = &skb_shinfo(skb)->frags[i];
  179. sg_set_page(sg, frag->page.p, frag->size, frag->page_offset);
  180. }
  181. }
  182. static void ssip_free_data(struct hsi_msg *msg)
  183. {
  184. struct sk_buff *skb;
  185. skb = msg->context;
  186. pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context,
  187. skb);
  188. msg->destructor = NULL;
  189. dev_kfree_skb(skb);
  190. hsi_free_msg(msg);
  191. }
  192. static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi,
  193. struct sk_buff *skb, gfp_t flags)
  194. {
  195. struct hsi_msg *msg;
  196. msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags);
  197. if (!msg)
  198. return NULL;
  199. ssip_skb_to_msg(skb, msg);
  200. msg->destructor = ssip_free_data;
  201. msg->channel = ssi->channel_id_data;
  202. msg->context = skb;
  203. return msg;
  204. }
  205. static inline void ssip_release_cmd(struct hsi_msg *msg)
  206. {
  207. struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl);
  208. dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg));
  209. spin_lock_bh(&ssi->lock);
  210. list_add_tail(&msg->link, &ssi->cmdqueue);
  211. spin_unlock_bh(&ssi->lock);
  212. }
  213. static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi)
  214. {
  215. struct hsi_msg *msg;
  216. BUG_ON(list_empty(&ssi->cmdqueue));
  217. spin_lock_bh(&ssi->lock);
  218. msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
  219. list_del(&msg->link);
  220. spin_unlock_bh(&ssi->lock);
  221. msg->destructor = ssip_release_cmd;
  222. return msg;
  223. }
  224. static void ssip_free_cmds(struct ssi_protocol *ssi)
  225. {
  226. struct hsi_msg *msg, *tmp;
  227. list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
  228. list_del(&msg->link);
  229. msg->destructor = NULL;
  230. kfree(sg_virt(msg->sgt.sgl));
  231. hsi_free_msg(msg);
  232. }
  233. }
  234. static int ssip_alloc_cmds(struct ssi_protocol *ssi)
  235. {
  236. struct hsi_msg *msg;
  237. u32 *buf;
  238. unsigned int i;
  239. for (i = 0; i < SSIP_MAX_CMDS; i++) {
  240. msg = hsi_alloc_msg(1, GFP_KERNEL);
  241. if (!msg)
  242. goto out;
  243. buf = kmalloc(sizeof(*buf), GFP_KERNEL);
  244. if (!buf) {
  245. hsi_free_msg(msg);
  246. goto out;
  247. }
  248. sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
  249. msg->channel = ssi->channel_id_cmd;
  250. list_add_tail(&msg->link, &ssi->cmdqueue);
  251. }
  252. return 0;
  253. out:
  254. ssip_free_cmds(ssi);
  255. return -ENOMEM;
  256. }
  257. static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state)
  258. {
  259. ssi->recv_state = state;
  260. switch (state) {
  261. case RECV_IDLE:
  262. del_timer(&ssi->rx_wd);
  263. if (ssi->send_state == SEND_IDLE)
  264. del_timer(&ssi->keep_alive);
  265. break;
  266. case RECV_READY:
  267. /* CMT speech workaround */
  268. if (atomic_read(&ssi->tx_usecnt))
  269. break;
  270. /* Otherwise fall through */
  271. case RECEIVING:
  272. mod_timer(&ssi->keep_alive, jiffies +
  273. msecs_to_jiffies(SSIP_KATOUT));
  274. mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
  275. break;
  276. default:
  277. break;
  278. }
  279. }
  280. static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state)
  281. {
  282. ssi->send_state = state;
  283. switch (state) {
  284. case SEND_IDLE:
  285. case SEND_READY:
  286. del_timer(&ssi->tx_wd);
  287. if (ssi->recv_state == RECV_IDLE)
  288. del_timer(&ssi->keep_alive);
  289. break;
  290. case WAIT4READY:
  291. case SENDING:
  292. case SENDING_SWBREAK:
  293. mod_timer(&ssi->keep_alive,
  294. jiffies + msecs_to_jiffies(SSIP_KATOUT));
  295. mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
  296. break;
  297. default:
  298. break;
  299. }
  300. }
  301. struct hsi_client *ssip_slave_get_master(struct hsi_client *slave)
  302. {
  303. struct hsi_client *master = ERR_PTR(-ENODEV);
  304. struct ssi_protocol *ssi;
  305. list_for_each_entry(ssi, &ssip_list, link)
  306. if (slave->device.parent == ssi->cl->device.parent) {
  307. master = ssi->cl;
  308. break;
  309. }
  310. return master;
  311. }
  312. EXPORT_SYMBOL_GPL(ssip_slave_get_master);
  313. int ssip_slave_start_tx(struct hsi_client *master)
  314. {
  315. struct ssi_protocol *ssi = hsi_client_drvdata(master);
  316. dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt));
  317. spin_lock_bh(&ssi->lock);
  318. if (ssi->send_state == SEND_IDLE) {
  319. ssip_set_txstate(ssi, WAIT4READY);
  320. hsi_start_tx(master);
  321. }
  322. spin_unlock_bh(&ssi->lock);
  323. atomic_inc(&ssi->tx_usecnt);
  324. return 0;
  325. }
  326. EXPORT_SYMBOL_GPL(ssip_slave_start_tx);
  327. int ssip_slave_stop_tx(struct hsi_client *master)
  328. {
  329. struct ssi_protocol *ssi = hsi_client_drvdata(master);
  330. WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0);
  331. if (atomic_dec_and_test(&ssi->tx_usecnt)) {
  332. spin_lock_bh(&ssi->lock);
  333. if ((ssi->send_state == SEND_READY) ||
  334. (ssi->send_state == WAIT4READY)) {
  335. ssip_set_txstate(ssi, SEND_IDLE);
  336. hsi_stop_tx(master);
  337. }
  338. spin_unlock_bh(&ssi->lock);
  339. }
  340. dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt));
  341. return 0;
  342. }
  343. EXPORT_SYMBOL_GPL(ssip_slave_stop_tx);
  344. int ssip_slave_running(struct hsi_client *master)
  345. {
  346. struct ssi_protocol *ssi = hsi_client_drvdata(master);
  347. return netif_running(ssi->netdev);
  348. }
  349. EXPORT_SYMBOL_GPL(ssip_slave_running);
  350. static void ssip_reset(struct hsi_client *cl)
  351. {
  352. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  353. struct list_head *head, *tmp;
  354. struct hsi_msg *msg;
  355. if (netif_running(ssi->netdev))
  356. netif_carrier_off(ssi->netdev);
  357. hsi_flush(cl);
  358. spin_lock_bh(&ssi->lock);
  359. if (ssi->send_state != SEND_IDLE)
  360. hsi_stop_tx(cl);
  361. spin_unlock_bh(&ssi->lock);
  362. if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
  363. ssi_waketest(cl, 0); /* FIXME: To be removed */
  364. spin_lock_bh(&ssi->lock);
  365. del_timer(&ssi->rx_wd);
  366. del_timer(&ssi->tx_wd);
  367. del_timer(&ssi->keep_alive);
  368. ssi->main_state = 0;
  369. ssi->send_state = 0;
  370. ssi->recv_state = 0;
  371. ssi->flags = 0;
  372. ssi->rxid = 0;
  373. ssi->txid = 0;
  374. list_for_each_safe(head, tmp, &ssi->txqueue) {
  375. msg = list_entry(head, struct hsi_msg, link);
  376. dev_dbg(&cl->device, "Pending TX data\n");
  377. list_del(head);
  378. ssip_free_data(msg);
  379. }
  380. ssi->txqueue_len = 0;
  381. spin_unlock_bh(&ssi->lock);
  382. }
  383. static void ssip_dump_state(struct hsi_client *cl)
  384. {
  385. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  386. struct hsi_msg *msg;
  387. spin_lock_bh(&ssi->lock);
  388. dev_err(&cl->device, "Main state: %d\n", ssi->main_state);
  389. dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state);
  390. dev_err(&cl->device, "Send state: %d\n", ssi->send_state);
  391. dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ?
  392. "Online" : "Offline");
  393. dev_err(&cl->device, "Wake test %d\n",
  394. test_bit(SSIP_WAKETEST_FLAG, &ssi->flags));
  395. dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid);
  396. dev_err(&cl->device, "Data TX id: %d\n", ssi->txid);
  397. list_for_each_entry(msg, &ssi->txqueue, link)
  398. dev_err(&cl->device, "pending TX data (%p)\n", msg);
  399. spin_unlock_bh(&ssi->lock);
  400. }
  401. static void ssip_error(struct hsi_client *cl)
  402. {
  403. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  404. struct hsi_msg *msg;
  405. ssip_dump_state(cl);
  406. ssip_reset(cl);
  407. msg = ssip_claim_cmd(ssi);
  408. msg->complete = ssip_rxcmd_complete;
  409. hsi_async_read(cl, msg);
  410. }
  411. static void ssip_keep_alive(struct timer_list *t)
  412. {
  413. struct ssi_protocol *ssi = from_timer(ssi, t, keep_alive);
  414. struct hsi_client *cl = ssi->cl;
  415. dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n",
  416. ssi->main_state, ssi->recv_state, ssi->send_state);
  417. spin_lock(&ssi->lock);
  418. if (ssi->recv_state == RECV_IDLE)
  419. switch (ssi->send_state) {
  420. case SEND_READY:
  421. if (atomic_read(&ssi->tx_usecnt) == 0)
  422. break;
  423. /*
  424. * Fall through. Workaround for cmt-speech
  425. * in that case we relay on audio timers.
  426. */
  427. case SEND_IDLE:
  428. spin_unlock(&ssi->lock);
  429. return;
  430. }
  431. mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT));
  432. spin_unlock(&ssi->lock);
  433. }
  434. static void ssip_rx_wd(struct timer_list *t)
  435. {
  436. struct ssi_protocol *ssi = from_timer(ssi, t, rx_wd);
  437. struct hsi_client *cl = ssi->cl;
  438. dev_err(&cl->device, "Watchdog triggered\n");
  439. ssip_error(cl);
  440. }
  441. static void ssip_tx_wd(struct timer_list *t)
  442. {
  443. struct ssi_protocol *ssi = from_timer(ssi, t, tx_wd);
  444. struct hsi_client *cl = ssi->cl;
  445. dev_err(&cl->device, "Watchdog triggered\n");
  446. ssip_error(cl);
  447. }
  448. static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl)
  449. {
  450. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  451. struct hsi_msg *msg;
  452. dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n");
  453. msg = ssip_claim_cmd(ssi);
  454. ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID));
  455. msg->complete = ssip_release_cmd;
  456. hsi_async_write(cl, msg);
  457. dev_dbg(&cl->device, "Issuing RX command\n");
  458. msg = ssip_claim_cmd(ssi);
  459. msg->complete = ssip_rxcmd_complete;
  460. hsi_async_read(cl, msg);
  461. }
  462. static void ssip_start_rx(struct hsi_client *cl)
  463. {
  464. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  465. struct hsi_msg *msg;
  466. dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state,
  467. ssi->recv_state);
  468. spin_lock_bh(&ssi->lock);
  469. /*
  470. * We can have two UP events in a row due to a short low
  471. * high transition. Therefore we need to ignore the sencond UP event.
  472. */
  473. if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) {
  474. spin_unlock_bh(&ssi->lock);
  475. return;
  476. }
  477. ssip_set_rxstate(ssi, RECV_READY);
  478. spin_unlock_bh(&ssi->lock);
  479. msg = ssip_claim_cmd(ssi);
  480. ssip_set_cmd(msg, SSIP_READY_CMD);
  481. msg->complete = ssip_release_cmd;
  482. dev_dbg(&cl->device, "Send READY\n");
  483. hsi_async_write(cl, msg);
  484. }
  485. static void ssip_stop_rx(struct hsi_client *cl)
  486. {
  487. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  488. dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state);
  489. spin_lock_bh(&ssi->lock);
  490. if (likely(ssi->main_state == ACTIVE))
  491. ssip_set_rxstate(ssi, RECV_IDLE);
  492. spin_unlock_bh(&ssi->lock);
  493. }
  494. static void ssip_free_strans(struct hsi_msg *msg)
  495. {
  496. ssip_free_data(msg->context);
  497. ssip_release_cmd(msg);
  498. }
  499. static void ssip_strans_complete(struct hsi_msg *msg)
  500. {
  501. struct hsi_client *cl = msg->cl;
  502. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  503. struct hsi_msg *data;
  504. data = msg->context;
  505. ssip_release_cmd(msg);
  506. spin_lock_bh(&ssi->lock);
  507. ssip_set_txstate(ssi, SENDING);
  508. spin_unlock_bh(&ssi->lock);
  509. hsi_async_write(cl, data);
  510. }
  511. static int ssip_xmit(struct hsi_client *cl)
  512. {
  513. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  514. struct hsi_msg *msg, *dmsg;
  515. struct sk_buff *skb;
  516. spin_lock_bh(&ssi->lock);
  517. if (list_empty(&ssi->txqueue)) {
  518. spin_unlock_bh(&ssi->lock);
  519. return 0;
  520. }
  521. dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link);
  522. list_del(&dmsg->link);
  523. ssi->txqueue_len--;
  524. spin_unlock_bh(&ssi->lock);
  525. msg = ssip_claim_cmd(ssi);
  526. skb = dmsg->context;
  527. msg->context = dmsg;
  528. msg->complete = ssip_strans_complete;
  529. msg->destructor = ssip_free_strans;
  530. spin_lock_bh(&ssi->lock);
  531. ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len),
  532. ssi->txid));
  533. ssi->txid++;
  534. ssip_set_txstate(ssi, SENDING);
  535. spin_unlock_bh(&ssi->lock);
  536. dev_dbg(&cl->device, "Send STRANS (%d frames)\n",
  537. SSIP_BYTES_TO_FRAMES(skb->len));
  538. return hsi_async_write(cl, msg);
  539. }
  540. /* In soft IRQ context */
  541. static void ssip_pn_rx(struct sk_buff *skb)
  542. {
  543. struct net_device *dev = skb->dev;
  544. if (unlikely(!netif_running(dev))) {
  545. dev_dbg(&dev->dev, "Drop RX packet\n");
  546. dev->stats.rx_dropped++;
  547. dev_kfree_skb(skb);
  548. return;
  549. }
  550. if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) {
  551. dev_dbg(&dev->dev, "Error drop RX packet\n");
  552. dev->stats.rx_errors++;
  553. dev->stats.rx_length_errors++;
  554. dev_kfree_skb(skb);
  555. return;
  556. }
  557. dev->stats.rx_packets++;
  558. dev->stats.rx_bytes += skb->len;
  559. /* length field is exchanged in network byte order */
  560. ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]);
  561. dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n",
  562. ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2]));
  563. skb->protocol = htons(ETH_P_PHONET);
  564. skb_reset_mac_header(skb);
  565. __skb_pull(skb, 1);
  566. netif_rx(skb);
  567. }
  568. static void ssip_rx_data_complete(struct hsi_msg *msg)
  569. {
  570. struct hsi_client *cl = msg->cl;
  571. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  572. struct sk_buff *skb;
  573. if (msg->status == HSI_STATUS_ERROR) {
  574. dev_err(&cl->device, "RX data error\n");
  575. ssip_free_data(msg);
  576. ssip_error(cl);
  577. return;
  578. }
  579. del_timer(&ssi->rx_wd); /* FIXME: Revisit */
  580. skb = msg->context;
  581. ssip_pn_rx(skb);
  582. hsi_free_msg(msg);
  583. }
  584. static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
  585. {
  586. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  587. struct hsi_msg *msg;
  588. /* Workaroud: Ignore CMT Loader message leftover */
  589. if (cmd == SSIP_CMT_LOADER_SYNC)
  590. return;
  591. switch (ssi->main_state) {
  592. case ACTIVE:
  593. dev_err(&cl->device, "Boot info req on active state\n");
  594. ssip_error(cl);
  595. /* Fall through */
  596. case INIT:
  597. case HANDSHAKE:
  598. spin_lock_bh(&ssi->lock);
  599. ssi->main_state = HANDSHAKE;
  600. spin_unlock_bh(&ssi->lock);
  601. if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
  602. ssi_waketest(cl, 1); /* FIXME: To be removed */
  603. spin_lock_bh(&ssi->lock);
  604. /* Start boot handshake watchdog */
  605. mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
  606. spin_unlock_bh(&ssi->lock);
  607. dev_dbg(&cl->device, "Send BOOTINFO_RESP\n");
  608. if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
  609. dev_warn(&cl->device, "boot info req verid mismatch\n");
  610. msg = ssip_claim_cmd(ssi);
  611. ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID));
  612. msg->complete = ssip_release_cmd;
  613. hsi_async_write(cl, msg);
  614. break;
  615. default:
  616. dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state);
  617. break;
  618. }
  619. }
  620. static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd)
  621. {
  622. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  623. if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
  624. dev_warn(&cl->device, "boot info resp verid mismatch\n");
  625. spin_lock_bh(&ssi->lock);
  626. if (ssi->main_state != ACTIVE)
  627. /* Use tx_wd as a boot watchdog in non ACTIVE state */
  628. mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
  629. else
  630. dev_dbg(&cl->device, "boot info resp ignored M(%d)\n",
  631. ssi->main_state);
  632. spin_unlock_bh(&ssi->lock);
  633. }
  634. static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd)
  635. {
  636. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  637. unsigned int wkres = SSIP_PAYLOAD(cmd);
  638. spin_lock_bh(&ssi->lock);
  639. if (ssi->main_state != HANDSHAKE) {
  640. dev_dbg(&cl->device, "wake lines test ignored M(%d)\n",
  641. ssi->main_state);
  642. spin_unlock_bh(&ssi->lock);
  643. return;
  644. }
  645. spin_unlock_bh(&ssi->lock);
  646. if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
  647. ssi_waketest(cl, 0); /* FIXME: To be removed */
  648. spin_lock_bh(&ssi->lock);
  649. ssi->main_state = ACTIVE;
  650. del_timer(&ssi->tx_wd); /* Stop boot handshake timer */
  651. spin_unlock_bh(&ssi->lock);
  652. dev_notice(&cl->device, "WAKELINES TEST %s\n",
  653. wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK");
  654. if (wkres & SSIP_WAKETEST_FAILED) {
  655. ssip_error(cl);
  656. return;
  657. }
  658. dev_dbg(&cl->device, "CMT is ONLINE\n");
  659. netif_wake_queue(ssi->netdev);
  660. netif_carrier_on(ssi->netdev);
  661. }
  662. static void ssip_rx_ready(struct hsi_client *cl)
  663. {
  664. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  665. spin_lock_bh(&ssi->lock);
  666. if (unlikely(ssi->main_state != ACTIVE)) {
  667. dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n",
  668. ssi->send_state, ssi->main_state);
  669. spin_unlock_bh(&ssi->lock);
  670. return;
  671. }
  672. if (ssi->send_state != WAIT4READY) {
  673. dev_dbg(&cl->device, "Ignore spurious READY command\n");
  674. spin_unlock_bh(&ssi->lock);
  675. return;
  676. }
  677. ssip_set_txstate(ssi, SEND_READY);
  678. spin_unlock_bh(&ssi->lock);
  679. ssip_xmit(cl);
  680. }
  681. static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
  682. {
  683. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  684. struct sk_buff *skb;
  685. struct hsi_msg *msg;
  686. int len = SSIP_PDU_LENGTH(cmd);
  687. dev_dbg(&cl->device, "RX strans: %d frames\n", len);
  688. spin_lock_bh(&ssi->lock);
  689. if (unlikely(ssi->main_state != ACTIVE)) {
  690. dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n",
  691. ssi->send_state, ssi->main_state);
  692. spin_unlock_bh(&ssi->lock);
  693. return;
  694. }
  695. ssip_set_rxstate(ssi, RECEIVING);
  696. if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) {
  697. dev_err(&cl->device, "START TRANS id %d expected %d\n",
  698. SSIP_MSG_ID(cmd), ssi->rxid);
  699. spin_unlock_bh(&ssi->lock);
  700. goto out1;
  701. }
  702. ssi->rxid++;
  703. spin_unlock_bh(&ssi->lock);
  704. skb = netdev_alloc_skb(ssi->netdev, len * 4);
  705. if (unlikely(!skb)) {
  706. dev_err(&cl->device, "No memory for rx skb\n");
  707. goto out1;
  708. }
  709. skb->dev = ssi->netdev;
  710. skb_put(skb, len * 4);
  711. msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
  712. if (unlikely(!msg)) {
  713. dev_err(&cl->device, "No memory for RX data msg\n");
  714. goto out2;
  715. }
  716. msg->complete = ssip_rx_data_complete;
  717. hsi_async_read(cl, msg);
  718. return;
  719. out2:
  720. dev_kfree_skb(skb);
  721. out1:
  722. ssip_error(cl);
  723. }
  724. static void ssip_rxcmd_complete(struct hsi_msg *msg)
  725. {
  726. struct hsi_client *cl = msg->cl;
  727. u32 cmd = ssip_get_cmd(msg);
  728. unsigned int cmdid = SSIP_COMMAND(cmd);
  729. if (msg->status == HSI_STATUS_ERROR) {
  730. dev_err(&cl->device, "RX error detected\n");
  731. ssip_release_cmd(msg);
  732. ssip_error(cl);
  733. return;
  734. }
  735. hsi_async_read(cl, msg);
  736. dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd);
  737. switch (cmdid) {
  738. case SSIP_SW_BREAK:
  739. /* Ignored */
  740. break;
  741. case SSIP_BOOTINFO_REQ:
  742. ssip_rx_bootinforeq(cl, cmd);
  743. break;
  744. case SSIP_BOOTINFO_RESP:
  745. ssip_rx_bootinforesp(cl, cmd);
  746. break;
  747. case SSIP_WAKETEST_RESULT:
  748. ssip_rx_waketest(cl, cmd);
  749. break;
  750. case SSIP_START_TRANS:
  751. ssip_rx_strans(cl, cmd);
  752. break;
  753. case SSIP_READY:
  754. ssip_rx_ready(cl);
  755. break;
  756. default:
  757. dev_warn(&cl->device, "command 0x%08x not supported\n", cmd);
  758. break;
  759. }
  760. }
  761. static void ssip_swbreak_complete(struct hsi_msg *msg)
  762. {
  763. struct hsi_client *cl = msg->cl;
  764. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  765. ssip_release_cmd(msg);
  766. spin_lock_bh(&ssi->lock);
  767. if (list_empty(&ssi->txqueue)) {
  768. if (atomic_read(&ssi->tx_usecnt)) {
  769. ssip_set_txstate(ssi, SEND_READY);
  770. } else {
  771. ssip_set_txstate(ssi, SEND_IDLE);
  772. hsi_stop_tx(cl);
  773. }
  774. spin_unlock_bh(&ssi->lock);
  775. } else {
  776. spin_unlock_bh(&ssi->lock);
  777. ssip_xmit(cl);
  778. }
  779. netif_wake_queue(ssi->netdev);
  780. }
  781. static void ssip_tx_data_complete(struct hsi_msg *msg)
  782. {
  783. struct hsi_client *cl = msg->cl;
  784. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  785. struct hsi_msg *cmsg;
  786. if (msg->status == HSI_STATUS_ERROR) {
  787. dev_err(&cl->device, "TX data error\n");
  788. ssip_error(cl);
  789. goto out;
  790. }
  791. spin_lock_bh(&ssi->lock);
  792. if (list_empty(&ssi->txqueue)) {
  793. ssip_set_txstate(ssi, SENDING_SWBREAK);
  794. spin_unlock_bh(&ssi->lock);
  795. cmsg = ssip_claim_cmd(ssi);
  796. ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD);
  797. cmsg->complete = ssip_swbreak_complete;
  798. dev_dbg(&cl->device, "Send SWBREAK\n");
  799. hsi_async_write(cl, cmsg);
  800. } else {
  801. spin_unlock_bh(&ssi->lock);
  802. ssip_xmit(cl);
  803. }
  804. out:
  805. ssip_free_data(msg);
  806. }
  807. static void ssip_port_event(struct hsi_client *cl, unsigned long event)
  808. {
  809. switch (event) {
  810. case HSI_EVENT_START_RX:
  811. ssip_start_rx(cl);
  812. break;
  813. case HSI_EVENT_STOP_RX:
  814. ssip_stop_rx(cl);
  815. break;
  816. default:
  817. return;
  818. }
  819. }
  820. static int ssip_pn_open(struct net_device *dev)
  821. {
  822. struct hsi_client *cl = to_hsi_client(dev->dev.parent);
  823. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  824. int err;
  825. err = hsi_claim_port(cl, 1);
  826. if (err < 0) {
  827. dev_err(&cl->device, "SSI port already claimed\n");
  828. return err;
  829. }
  830. err = hsi_register_port_event(cl, ssip_port_event);
  831. if (err < 0) {
  832. dev_err(&cl->device, "Register HSI port event failed (%d)\n",
  833. err);
  834. return err;
  835. }
  836. dev_dbg(&cl->device, "Configuring SSI port\n");
  837. hsi_setup(cl);
  838. if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
  839. ssi_waketest(cl, 1); /* FIXME: To be removed */
  840. spin_lock_bh(&ssi->lock);
  841. ssi->main_state = HANDSHAKE;
  842. spin_unlock_bh(&ssi->lock);
  843. ssip_send_bootinfo_req_cmd(cl);
  844. return 0;
  845. }
  846. static int ssip_pn_stop(struct net_device *dev)
  847. {
  848. struct hsi_client *cl = to_hsi_client(dev->dev.parent);
  849. ssip_reset(cl);
  850. hsi_unregister_port_event(cl);
  851. hsi_release_port(cl);
  852. return 0;
  853. }
  854. static void ssip_xmit_work(struct work_struct *work)
  855. {
  856. struct ssi_protocol *ssi =
  857. container_of(work, struct ssi_protocol, work);
  858. struct hsi_client *cl = ssi->cl;
  859. ssip_xmit(cl);
  860. }
  861. static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
  862. {
  863. struct hsi_client *cl = to_hsi_client(dev->dev.parent);
  864. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  865. struct hsi_msg *msg;
  866. if ((skb->protocol != htons(ETH_P_PHONET)) ||
  867. (skb->len < SSIP_MIN_PN_HDR))
  868. goto drop;
  869. /* Pad to 32-bits - FIXME: Revisit*/
  870. if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
  871. goto inc_dropped;
  872. /*
  873. * Modem sends Phonet messages over SSI with its own endianness.
  874. * Assume that modem has the same endianness as we do.
  875. */
  876. if (skb_cow_head(skb, 0))
  877. goto drop;
  878. /* length field is exchanged in network byte order */
  879. ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]);
  880. msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
  881. if (!msg) {
  882. dev_dbg(&cl->device, "Dropping tx data: No memory\n");
  883. goto drop;
  884. }
  885. msg->complete = ssip_tx_data_complete;
  886. spin_lock_bh(&ssi->lock);
  887. if (unlikely(ssi->main_state != ACTIVE)) {
  888. spin_unlock_bh(&ssi->lock);
  889. dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n");
  890. goto drop2;
  891. }
  892. list_add_tail(&msg->link, &ssi->txqueue);
  893. ssi->txqueue_len++;
  894. if (dev->tx_queue_len < ssi->txqueue_len) {
  895. dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len);
  896. netif_stop_queue(dev);
  897. }
  898. if (ssi->send_state == SEND_IDLE) {
  899. ssip_set_txstate(ssi, WAIT4READY);
  900. spin_unlock_bh(&ssi->lock);
  901. dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len);
  902. hsi_start_tx(cl);
  903. } else if (ssi->send_state == SEND_READY) {
  904. /* Needed for cmt-speech workaround */
  905. dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n",
  906. ssi->txqueue_len);
  907. spin_unlock_bh(&ssi->lock);
  908. schedule_work(&ssi->work);
  909. } else {
  910. spin_unlock_bh(&ssi->lock);
  911. }
  912. dev->stats.tx_packets++;
  913. dev->stats.tx_bytes += skb->len;
  914. return 0;
  915. drop2:
  916. hsi_free_msg(msg);
  917. drop:
  918. dev_kfree_skb(skb);
  919. inc_dropped:
  920. dev->stats.tx_dropped++;
  921. return 0;
  922. }
  923. /* CMT reset event handler */
  924. void ssip_reset_event(struct hsi_client *master)
  925. {
  926. struct ssi_protocol *ssi = hsi_client_drvdata(master);
  927. dev_err(&ssi->cl->device, "CMT reset detected!\n");
  928. ssip_error(ssi->cl);
  929. }
  930. EXPORT_SYMBOL_GPL(ssip_reset_event);
  931. static const struct net_device_ops ssip_pn_ops = {
  932. .ndo_open = ssip_pn_open,
  933. .ndo_stop = ssip_pn_stop,
  934. .ndo_start_xmit = ssip_pn_xmit,
  935. };
  936. static void ssip_pn_setup(struct net_device *dev)
  937. {
  938. dev->features = 0;
  939. dev->netdev_ops = &ssip_pn_ops;
  940. dev->type = ARPHRD_PHONET;
  941. dev->flags = IFF_POINTOPOINT | IFF_NOARP;
  942. dev->mtu = SSIP_DEFAULT_MTU;
  943. dev->hard_header_len = 1;
  944. dev->dev_addr[0] = PN_MEDIA_SOS;
  945. dev->addr_len = 1;
  946. dev->tx_queue_len = SSIP_TXQUEUE_LEN;
  947. dev->needs_free_netdev = true;
  948. dev->header_ops = &phonet_header_ops;
  949. }
  950. static int ssi_protocol_probe(struct device *dev)
  951. {
  952. static const char ifname[] = "phonet%d";
  953. struct hsi_client *cl = to_hsi_client(dev);
  954. struct ssi_protocol *ssi;
  955. int err;
  956. ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
  957. if (!ssi)
  958. return -ENOMEM;
  959. spin_lock_init(&ssi->lock);
  960. timer_setup(&ssi->rx_wd, ssip_rx_wd, TIMER_DEFERRABLE);
  961. timer_setup(&ssi->tx_wd, ssip_tx_wd, TIMER_DEFERRABLE);
  962. timer_setup(&ssi->keep_alive, ssip_keep_alive, 0);
  963. INIT_LIST_HEAD(&ssi->txqueue);
  964. INIT_LIST_HEAD(&ssi->cmdqueue);
  965. atomic_set(&ssi->tx_usecnt, 0);
  966. hsi_client_set_drvdata(cl, ssi);
  967. ssi->cl = cl;
  968. INIT_WORK(&ssi->work, ssip_xmit_work);
  969. ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control");
  970. if (ssi->channel_id_cmd < 0) {
  971. err = ssi->channel_id_cmd;
  972. dev_err(dev, "Could not get cmd channel (%d)\n", err);
  973. goto out;
  974. }
  975. ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data");
  976. if (ssi->channel_id_data < 0) {
  977. err = ssi->channel_id_data;
  978. dev_err(dev, "Could not get data channel (%d)\n", err);
  979. goto out;
  980. }
  981. err = ssip_alloc_cmds(ssi);
  982. if (err < 0) {
  983. dev_err(dev, "No memory for commands\n");
  984. goto out;
  985. }
  986. ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup);
  987. if (!ssi->netdev) {
  988. dev_err(dev, "No memory for netdev\n");
  989. err = -ENOMEM;
  990. goto out1;
  991. }
  992. /* MTU range: 6 - 65535 */
  993. ssi->netdev->min_mtu = PHONET_MIN_MTU;
  994. ssi->netdev->max_mtu = SSIP_MAX_MTU;
  995. SET_NETDEV_DEV(ssi->netdev, dev);
  996. netif_carrier_off(ssi->netdev);
  997. err = register_netdev(ssi->netdev);
  998. if (err < 0) {
  999. dev_err(dev, "Register netdev failed (%d)\n", err);
  1000. goto out2;
  1001. }
  1002. list_add(&ssi->link, &ssip_list);
  1003. dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n",
  1004. ssi->channel_id_cmd, ssi->channel_id_data);
  1005. return 0;
  1006. out2:
  1007. free_netdev(ssi->netdev);
  1008. out1:
  1009. ssip_free_cmds(ssi);
  1010. out:
  1011. kfree(ssi);
  1012. return err;
  1013. }
  1014. static int ssi_protocol_remove(struct device *dev)
  1015. {
  1016. struct hsi_client *cl = to_hsi_client(dev);
  1017. struct ssi_protocol *ssi = hsi_client_drvdata(cl);
  1018. list_del(&ssi->link);
  1019. unregister_netdev(ssi->netdev);
  1020. ssip_free_cmds(ssi);
  1021. hsi_client_set_drvdata(cl, NULL);
  1022. kfree(ssi);
  1023. return 0;
  1024. }
  1025. static struct hsi_client_driver ssip_driver = {
  1026. .driver = {
  1027. .name = "ssi-protocol",
  1028. .owner = THIS_MODULE,
  1029. .probe = ssi_protocol_probe,
  1030. .remove = ssi_protocol_remove,
  1031. },
  1032. };
  1033. static int __init ssip_init(void)
  1034. {
  1035. pr_info("SSI protocol aka McSAAB added\n");
  1036. return hsi_register_client_driver(&ssip_driver);
  1037. }
  1038. module_init(ssip_init);
  1039. static void __exit ssip_exit(void)
  1040. {
  1041. hsi_unregister_client_driver(&ssip_driver);
  1042. pr_info("SSI protocol driver removed\n");
  1043. }
  1044. module_exit(ssip_exit);
  1045. MODULE_ALIAS("hsi:ssi-protocol");
  1046. MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
  1047. MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>");
  1048. MODULE_DESCRIPTION("SSI protocol improved aka McSAAB");
  1049. MODULE_LICENSE("GPL");