client.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #include <linux/pci.h>
  17. #include <linux/sched.h>
  18. #include <linux/wait.h>
  19. #include <linux/delay.h>
  20. #include <linux/mei.h>
  21. #include "mei_dev.h"
  22. #include "hbm.h"
  23. #include "client.h"
  24. /**
  25. * mei_me_cl_by_uuid - locate index of me client
  26. *
  27. * @dev: mei device
  28. *
  29. * Locking: called under "dev->device_lock" lock
  30. *
  31. * returns me client index or -ENOENT if not found
  32. */
  33. int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
  34. {
  35. int i;
  36. for (i = 0; i < dev->me_clients_num; ++i)
  37. if (uuid_le_cmp(*uuid,
  38. dev->me_clients[i].props.protocol_name) == 0)
  39. return i;
  40. return -ENOENT;
  41. }
  42. /**
  43. * mei_me_cl_by_id return index to me_clients for client_id
  44. *
  45. * @dev: the device structure
  46. * @client_id: me client id
  47. *
  48. * Locking: called under "dev->device_lock" lock
  49. *
  50. * returns index on success, -ENOENT on failure.
  51. */
  52. int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
  53. {
  54. int i;
  55. for (i = 0; i < dev->me_clients_num; i++)
  56. if (dev->me_clients[i].client_id == client_id)
  57. return i;
  58. return -ENOENT;
  59. }
  60. /**
  61. * mei_io_list_flush - removes list entry belonging to cl.
  62. *
  63. * @list: An instance of our list structure
  64. * @cl: host client
  65. */
  66. void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
  67. {
  68. struct mei_cl_cb *cb;
  69. struct mei_cl_cb *next;
  70. list_for_each_entry_safe(cb, next, &list->list, list) {
  71. if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
  72. list_del(&cb->list);
  73. }
  74. }
  75. /**
  76. * mei_io_cb_free - free mei_cb_private related memory
  77. *
  78. * @cb: mei callback struct
  79. */
  80. void mei_io_cb_free(struct mei_cl_cb *cb)
  81. {
  82. if (cb == NULL)
  83. return;
  84. kfree(cb->request_buffer.data);
  85. kfree(cb->response_buffer.data);
  86. kfree(cb);
  87. }
  88. /**
  89. * mei_io_cb_init - allocate and initialize io callback
  90. *
  91. * @cl - mei client
  92. * @fp: pointer to file structure
  93. *
  94. * returns mei_cl_cb pointer or NULL;
  95. */
  96. struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
  97. {
  98. struct mei_cl_cb *cb;
  99. cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
  100. if (!cb)
  101. return NULL;
  102. mei_io_list_init(cb);
  103. cb->file_object = fp;
  104. cb->cl = cl;
  105. cb->buf_idx = 0;
  106. return cb;
  107. }
  108. /**
  109. * mei_io_cb_alloc_req_buf - allocate request buffer
  110. *
  111. * @cb: io callback structure
  112. * @length: size of the buffer
  113. *
  114. * returns 0 on success
  115. * -EINVAL if cb is NULL
  116. * -ENOMEM if allocation failed
  117. */
  118. int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
  119. {
  120. if (!cb)
  121. return -EINVAL;
  122. if (length == 0)
  123. return 0;
  124. cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
  125. if (!cb->request_buffer.data)
  126. return -ENOMEM;
  127. cb->request_buffer.size = length;
  128. return 0;
  129. }
  130. /**
  131. * mei_io_cb_alloc_resp_buf - allocate response buffer
  132. *
  133. * @cb: io callback structure
  134. * @length: size of the buffer
  135. *
  136. * returns 0 on success
  137. * -EINVAL if cb is NULL
  138. * -ENOMEM if allocation failed
  139. */
  140. int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
  141. {
  142. if (!cb)
  143. return -EINVAL;
  144. if (length == 0)
  145. return 0;
  146. cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
  147. if (!cb->response_buffer.data)
  148. return -ENOMEM;
  149. cb->response_buffer.size = length;
  150. return 0;
  151. }
  152. /**
  153. * mei_cl_flush_queues - flushes queue lists belonging to cl.
  154. *
  155. * @cl: host client
  156. */
  157. int mei_cl_flush_queues(struct mei_cl *cl)
  158. {
  159. struct mei_device *dev;
  160. if (WARN_ON(!cl || !cl->dev))
  161. return -EINVAL;
  162. dev = cl->dev;
  163. cl_dbg(dev, cl, "remove list entry belonging to cl\n");
  164. mei_io_list_flush(&cl->dev->read_list, cl);
  165. mei_io_list_flush(&cl->dev->write_list, cl);
  166. mei_io_list_flush(&cl->dev->write_waiting_list, cl);
  167. mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
  168. mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
  169. mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
  170. mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
  171. return 0;
  172. }
  173. /**
  174. * mei_cl_init - initializes cl.
  175. *
  176. * @cl: host client to be initialized
  177. * @dev: mei device
  178. */
  179. void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
  180. {
  181. memset(cl, 0, sizeof(struct mei_cl));
  182. init_waitqueue_head(&cl->wait);
  183. init_waitqueue_head(&cl->rx_wait);
  184. init_waitqueue_head(&cl->tx_wait);
  185. INIT_LIST_HEAD(&cl->link);
  186. INIT_LIST_HEAD(&cl->device_link);
  187. cl->reading_state = MEI_IDLE;
  188. cl->writing_state = MEI_IDLE;
  189. cl->dev = dev;
  190. }
  191. /**
  192. * mei_cl_allocate - allocates cl structure and sets it up.
  193. *
  194. * @dev: mei device
  195. * returns The allocated file or NULL on failure
  196. */
  197. struct mei_cl *mei_cl_allocate(struct mei_device *dev)
  198. {
  199. struct mei_cl *cl;
  200. cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
  201. if (!cl)
  202. return NULL;
  203. mei_cl_init(cl, dev);
  204. return cl;
  205. }
  206. /**
  207. * mei_cl_find_read_cb - find this cl's callback in the read list
  208. *
  209. * @cl: host client
  210. *
  211. * returns cb on success, NULL on error
  212. */
  213. struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
  214. {
  215. struct mei_device *dev = cl->dev;
  216. struct mei_cl_cb *cb;
  217. list_for_each_entry(cb, &dev->read_list.list, list)
  218. if (mei_cl_cmp_id(cl, cb->cl))
  219. return cb;
  220. return NULL;
  221. }
  222. /** mei_cl_link: allocate host id in the host map
  223. *
  224. * @cl - host client
  225. * @id - fixed host id or -1 for generic one
  226. *
  227. * returns 0 on success
  228. * -EINVAL on incorrect values
  229. * -ENONET if client not found
  230. */
  231. int mei_cl_link(struct mei_cl *cl, int id)
  232. {
  233. struct mei_device *dev;
  234. long open_handle_count;
  235. if (WARN_ON(!cl || !cl->dev))
  236. return -EINVAL;
  237. dev = cl->dev;
  238. /* If Id is not assigned get one*/
  239. if (id == MEI_HOST_CLIENT_ID_ANY)
  240. id = find_first_zero_bit(dev->host_clients_map,
  241. MEI_CLIENTS_MAX);
  242. if (id >= MEI_CLIENTS_MAX) {
  243. dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
  244. return -EMFILE;
  245. }
  246. open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
  247. if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
  248. dev_err(&dev->pdev->dev, "open_handle_count exceeded %d",
  249. MEI_MAX_OPEN_HANDLE_COUNT);
  250. return -EMFILE;
  251. }
  252. dev->open_handle_count++;
  253. cl->host_client_id = id;
  254. list_add_tail(&cl->link, &dev->file_list);
  255. set_bit(id, dev->host_clients_map);
  256. cl->state = MEI_FILE_INITIALIZING;
  257. cl_dbg(dev, cl, "link cl\n");
  258. return 0;
  259. }
  260. /**
  261. * mei_cl_unlink - remove me_cl from the list
  262. *
  263. * @cl: host client
  264. */
  265. int mei_cl_unlink(struct mei_cl *cl)
  266. {
  267. struct mei_device *dev;
  268. /* don't shout on error exit path */
  269. if (!cl)
  270. return 0;
  271. /* wd and amthif might not be initialized */
  272. if (!cl->dev)
  273. return 0;
  274. dev = cl->dev;
  275. cl_dbg(dev, cl, "unlink client");
  276. if (dev->open_handle_count > 0)
  277. dev->open_handle_count--;
  278. /* never clear the 0 bit */
  279. if (cl->host_client_id)
  280. clear_bit(cl->host_client_id, dev->host_clients_map);
  281. list_del_init(&cl->link);
  282. cl->state = MEI_FILE_INITIALIZING;
  283. return 0;
  284. }
  285. void mei_host_client_init(struct work_struct *work)
  286. {
  287. struct mei_device *dev = container_of(work,
  288. struct mei_device, init_work);
  289. struct mei_client_properties *client_props;
  290. int i;
  291. mutex_lock(&dev->device_lock);
  292. for (i = 0; i < dev->me_clients_num; i++) {
  293. client_props = &dev->me_clients[i].props;
  294. if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
  295. mei_amthif_host_init(dev);
  296. else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
  297. mei_wd_host_init(dev);
  298. else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid))
  299. mei_nfc_host_init(dev);
  300. }
  301. dev->dev_state = MEI_DEV_ENABLED;
  302. dev->reset_count = 0;
  303. mutex_unlock(&dev->device_lock);
  304. }
  305. /**
  306. * mei_hbuf_acquire: try to acquire host buffer
  307. *
  308. * @dev: the device structure
  309. * returns true if host buffer was acquired
  310. */
  311. bool mei_hbuf_acquire(struct mei_device *dev)
  312. {
  313. if (!dev->hbuf_is_ready) {
  314. dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
  315. return false;
  316. }
  317. dev->hbuf_is_ready = false;
  318. return true;
  319. }
  320. /**
  321. * mei_cl_disconnect - disconnect host client from the me one
  322. *
  323. * @cl: host client
  324. *
  325. * Locking: called under "dev->device_lock" lock
  326. *
  327. * returns 0 on success, <0 on failure.
  328. */
  329. int mei_cl_disconnect(struct mei_cl *cl)
  330. {
  331. struct mei_device *dev;
  332. struct mei_cl_cb *cb;
  333. int rets, err;
  334. if (WARN_ON(!cl || !cl->dev))
  335. return -ENODEV;
  336. dev = cl->dev;
  337. cl_dbg(dev, cl, "disconnecting");
  338. if (cl->state != MEI_FILE_DISCONNECTING)
  339. return 0;
  340. cb = mei_io_cb_init(cl, NULL);
  341. if (!cb)
  342. return -ENOMEM;
  343. cb->fop_type = MEI_FOP_CLOSE;
  344. if (mei_hbuf_acquire(dev)) {
  345. if (mei_hbm_cl_disconnect_req(dev, cl)) {
  346. rets = -ENODEV;
  347. cl_err(dev, cl, "failed to disconnect.\n");
  348. goto free;
  349. }
  350. mdelay(10); /* Wait for hardware disconnection ready */
  351. list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
  352. } else {
  353. cl_dbg(dev, cl, "add disconnect cb to control write list\n");
  354. list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
  355. }
  356. mutex_unlock(&dev->device_lock);
  357. err = wait_event_timeout(dev->wait_recvd_msg,
  358. MEI_FILE_DISCONNECTED == cl->state,
  359. mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
  360. mutex_lock(&dev->device_lock);
  361. if (MEI_FILE_DISCONNECTED == cl->state) {
  362. rets = 0;
  363. cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
  364. } else {
  365. rets = -ENODEV;
  366. if (MEI_FILE_DISCONNECTED != cl->state)
  367. cl_err(dev, cl, "wrong status client disconnect.\n");
  368. if (err)
  369. cl_dbg(dev, cl, "wait failed disconnect err=%08x\n",
  370. err);
  371. cl_err(dev, cl, "failed to disconnect from FW client.\n");
  372. }
  373. mei_io_list_flush(&dev->ctrl_rd_list, cl);
  374. mei_io_list_flush(&dev->ctrl_wr_list, cl);
  375. free:
  376. mei_io_cb_free(cb);
  377. return rets;
  378. }
  379. /**
  380. * mei_cl_is_other_connecting - checks if other
  381. * client with the same me client id is connecting
  382. *
  383. * @cl: private data of the file object
  384. *
  385. * returns true if other client is connected, false - otherwise.
  386. */
  387. bool mei_cl_is_other_connecting(struct mei_cl *cl)
  388. {
  389. struct mei_device *dev;
  390. struct mei_cl *ocl; /* the other client */
  391. if (WARN_ON(!cl || !cl->dev))
  392. return false;
  393. dev = cl->dev;
  394. list_for_each_entry(ocl, &dev->file_list, link) {
  395. if (ocl->state == MEI_FILE_CONNECTING &&
  396. ocl != cl &&
  397. cl->me_client_id == ocl->me_client_id)
  398. return true;
  399. }
  400. return false;
  401. }
  402. /**
  403. * mei_cl_connect - connect host client to the me one
  404. *
  405. * @cl: host client
  406. *
  407. * Locking: called under "dev->device_lock" lock
  408. *
  409. * returns 0 on success, <0 on failure.
  410. */
  411. int mei_cl_connect(struct mei_cl *cl, struct file *file)
  412. {
  413. struct mei_device *dev;
  414. struct mei_cl_cb *cb;
  415. int rets;
  416. if (WARN_ON(!cl || !cl->dev))
  417. return -ENODEV;
  418. dev = cl->dev;
  419. cb = mei_io_cb_init(cl, file);
  420. if (!cb) {
  421. rets = -ENOMEM;
  422. goto out;
  423. }
  424. cb->fop_type = MEI_FOP_CONNECT;
  425. /* run hbuf acquire last so we don't have to undo */
  426. if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
  427. if (mei_hbm_cl_connect_req(dev, cl)) {
  428. rets = -ENODEV;
  429. goto out;
  430. }
  431. cl->timer_count = MEI_CONNECT_TIMEOUT;
  432. list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
  433. } else {
  434. list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
  435. }
  436. mutex_unlock(&dev->device_lock);
  437. wait_event_timeout(dev->wait_recvd_msg,
  438. (cl->state == MEI_FILE_CONNECTED ||
  439. cl->state == MEI_FILE_DISCONNECTED),
  440. mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
  441. mutex_lock(&dev->device_lock);
  442. if (cl->state != MEI_FILE_CONNECTED) {
  443. /* something went really wrong */
  444. if (!cl->status)
  445. cl->status = -EFAULT;
  446. mei_io_list_flush(&dev->ctrl_rd_list, cl);
  447. mei_io_list_flush(&dev->ctrl_wr_list, cl);
  448. }
  449. rets = cl->status;
  450. out:
  451. mei_io_cb_free(cb);
  452. return rets;
  453. }
  454. /**
  455. * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
  456. *
  457. * @cl: private data of the file object
  458. *
  459. * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
  460. * -ENOENT if mei_cl is not present
  461. * -EINVAL if single_recv_buf == 0
  462. */
  463. int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
  464. {
  465. struct mei_device *dev;
  466. struct mei_me_client *me_cl;
  467. int id;
  468. if (WARN_ON(!cl || !cl->dev))
  469. return -EINVAL;
  470. dev = cl->dev;
  471. if (!dev->me_clients_num)
  472. return 0;
  473. if (cl->mei_flow_ctrl_creds > 0)
  474. return 1;
  475. id = mei_me_cl_by_id(dev, cl->me_client_id);
  476. if (id < 0) {
  477. cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
  478. return id;
  479. }
  480. me_cl = &dev->me_clients[id];
  481. if (me_cl->mei_flow_ctrl_creds) {
  482. if (WARN_ON(me_cl->props.single_recv_buf == 0))
  483. return -EINVAL;
  484. return 1;
  485. }
  486. return 0;
  487. }
  488. /**
  489. * mei_cl_flow_ctrl_reduce - reduces flow_control.
  490. *
  491. * @cl: private data of the file object
  492. *
  493. * @returns
  494. * 0 on success
  495. * -ENOENT when me client is not found
  496. * -EINVAL when ctrl credits are <= 0
  497. */
  498. int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
  499. {
  500. struct mei_device *dev;
  501. struct mei_me_client *me_cl;
  502. int id;
  503. if (WARN_ON(!cl || !cl->dev))
  504. return -EINVAL;
  505. dev = cl->dev;
  506. id = mei_me_cl_by_id(dev, cl->me_client_id);
  507. if (id < 0) {
  508. cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
  509. return id;
  510. }
  511. me_cl = &dev->me_clients[id];
  512. if (me_cl->props.single_recv_buf != 0) {
  513. if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
  514. return -EINVAL;
  515. me_cl->mei_flow_ctrl_creds--;
  516. } else {
  517. if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
  518. return -EINVAL;
  519. cl->mei_flow_ctrl_creds--;
  520. }
  521. return 0;
  522. }
  523. /**
  524. * mei_cl_read_start - the start read client message function.
  525. *
  526. * @cl: host client
  527. *
  528. * returns 0 on success, <0 on failure.
  529. */
  530. int mei_cl_read_start(struct mei_cl *cl, size_t length)
  531. {
  532. struct mei_device *dev;
  533. struct mei_cl_cb *cb;
  534. int rets;
  535. int i;
  536. if (WARN_ON(!cl || !cl->dev))
  537. return -ENODEV;
  538. dev = cl->dev;
  539. if (!mei_cl_is_connected(cl))
  540. return -ENODEV;
  541. if (cl->read_cb) {
  542. cl_dbg(dev, cl, "read is pending.\n");
  543. return -EBUSY;
  544. }
  545. i = mei_me_cl_by_id(dev, cl->me_client_id);
  546. if (i < 0) {
  547. cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
  548. return -ENOTTY;
  549. }
  550. cb = mei_io_cb_init(cl, NULL);
  551. if (!cb)
  552. return -ENOMEM;
  553. /* always allocate at least client max message */
  554. length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length);
  555. rets = mei_io_cb_alloc_resp_buf(cb, length);
  556. if (rets)
  557. goto err;
  558. cb->fop_type = MEI_FOP_READ;
  559. if (mei_hbuf_acquire(dev)) {
  560. if (mei_hbm_cl_flow_control_req(dev, cl)) {
  561. cl_err(dev, cl, "flow control send failed\n");
  562. rets = -ENODEV;
  563. goto err;
  564. }
  565. list_add_tail(&cb->list, &dev->read_list.list);
  566. } else {
  567. list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
  568. }
  569. cl->read_cb = cb;
  570. return rets;
  571. err:
  572. mei_io_cb_free(cb);
  573. return rets;
  574. }
  575. /**
  576. * mei_cl_irq_write - write a message to device
  577. * from the interrupt thread context
  578. *
  579. * @cl: client
  580. * @cb: callback block.
  581. * @cmpl_list: complete list.
  582. *
  583. * returns 0, OK; otherwise error.
  584. */
  585. int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
  586. struct mei_cl_cb *cmpl_list)
  587. {
  588. struct mei_device *dev;
  589. struct mei_msg_data *buf;
  590. struct mei_msg_hdr mei_hdr;
  591. size_t len;
  592. u32 msg_slots;
  593. int slots;
  594. int rets;
  595. if (WARN_ON(!cl || !cl->dev))
  596. return -ENODEV;
  597. dev = cl->dev;
  598. buf = &cb->request_buffer;
  599. rets = mei_cl_flow_ctrl_creds(cl);
  600. if (rets < 0)
  601. return rets;
  602. if (rets == 0) {
  603. cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
  604. return 0;
  605. }
  606. slots = mei_hbuf_empty_slots(dev);
  607. len = buf->size - cb->buf_idx;
  608. msg_slots = mei_data2slots(len);
  609. mei_hdr.host_addr = cl->host_client_id;
  610. mei_hdr.me_addr = cl->me_client_id;
  611. mei_hdr.reserved = 0;
  612. mei_hdr.internal = cb->internal;
  613. if (slots >= msg_slots) {
  614. mei_hdr.length = len;
  615. mei_hdr.msg_complete = 1;
  616. /* Split the message only if we can write the whole host buffer */
  617. } else if (slots == dev->hbuf_depth) {
  618. msg_slots = slots;
  619. len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
  620. mei_hdr.length = len;
  621. mei_hdr.msg_complete = 0;
  622. } else {
  623. /* wait for next time the host buffer is empty */
  624. return 0;
  625. }
  626. cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
  627. cb->request_buffer.size, cb->buf_idx);
  628. rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
  629. if (rets) {
  630. cl->status = rets;
  631. list_move_tail(&cb->list, &cmpl_list->list);
  632. return rets;
  633. }
  634. cl->status = 0;
  635. cl->writing_state = MEI_WRITING;
  636. cb->buf_idx += mei_hdr.length;
  637. if (mei_hdr.msg_complete) {
  638. if (mei_cl_flow_ctrl_reduce(cl))
  639. return -EIO;
  640. list_move_tail(&cb->list, &dev->write_waiting_list.list);
  641. }
  642. return 0;
  643. }
  644. /**
  645. * mei_cl_write - submit a write cb to mei device
  646. assumes device_lock is locked
  647. *
  648. * @cl: host client
  649. * @cl: write callback with filled data
  650. *
  651. * returns number of bytes sent on success, <0 on failure.
  652. */
  653. int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
  654. {
  655. struct mei_device *dev;
  656. struct mei_msg_data *buf;
  657. struct mei_msg_hdr mei_hdr;
  658. int rets;
  659. if (WARN_ON(!cl || !cl->dev))
  660. return -ENODEV;
  661. if (WARN_ON(!cb))
  662. return -EINVAL;
  663. dev = cl->dev;
  664. buf = &cb->request_buffer;
  665. cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size);
  666. cb->fop_type = MEI_FOP_WRITE;
  667. cb->buf_idx = 0;
  668. cl->writing_state = MEI_IDLE;
  669. mei_hdr.host_addr = cl->host_client_id;
  670. mei_hdr.me_addr = cl->me_client_id;
  671. mei_hdr.reserved = 0;
  672. mei_hdr.msg_complete = 0;
  673. mei_hdr.internal = cb->internal;
  674. rets = mei_cl_flow_ctrl_creds(cl);
  675. if (rets < 0)
  676. goto err;
  677. if (rets == 0) {
  678. cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
  679. rets = buf->size;
  680. goto out;
  681. }
  682. if (!mei_hbuf_acquire(dev)) {
  683. cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
  684. rets = buf->size;
  685. goto out;
  686. }
  687. /* Check for a maximum length */
  688. if (buf->size > mei_hbuf_max_len(dev)) {
  689. mei_hdr.length = mei_hbuf_max_len(dev);
  690. mei_hdr.msg_complete = 0;
  691. } else {
  692. mei_hdr.length = buf->size;
  693. mei_hdr.msg_complete = 1;
  694. }
  695. rets = mei_write_message(dev, &mei_hdr, buf->data);
  696. if (rets)
  697. goto err;
  698. cl->writing_state = MEI_WRITING;
  699. cb->buf_idx = mei_hdr.length;
  700. out:
  701. if (mei_hdr.msg_complete) {
  702. rets = mei_cl_flow_ctrl_reduce(cl);
  703. if (rets < 0)
  704. goto err;
  705. list_add_tail(&cb->list, &dev->write_waiting_list.list);
  706. } else {
  707. list_add_tail(&cb->list, &dev->write_list.list);
  708. }
  709. if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
  710. mutex_unlock(&dev->device_lock);
  711. rets = wait_event_interruptible(cl->tx_wait,
  712. cl->writing_state == MEI_WRITE_COMPLETE);
  713. mutex_lock(&dev->device_lock);
  714. /* wait_event_interruptible returns -ERESTARTSYS */
  715. if (rets) {
  716. if (signal_pending(current))
  717. rets = -EINTR;
  718. goto err;
  719. }
  720. }
  721. rets = buf->size;
  722. err:
  723. return rets;
  724. }
  725. /**
  726. * mei_cl_complete - processes completed operation for a client
  727. *
  728. * @cl: private data of the file object.
  729. * @cb: callback block.
  730. */
  731. void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
  732. {
  733. if (cb->fop_type == MEI_FOP_WRITE) {
  734. mei_io_cb_free(cb);
  735. cb = NULL;
  736. cl->writing_state = MEI_WRITE_COMPLETE;
  737. if (waitqueue_active(&cl->tx_wait))
  738. wake_up_interruptible(&cl->tx_wait);
  739. } else if (cb->fop_type == MEI_FOP_READ &&
  740. MEI_READING == cl->reading_state) {
  741. cl->reading_state = MEI_READ_COMPLETE;
  742. if (waitqueue_active(&cl->rx_wait))
  743. wake_up_interruptible(&cl->rx_wait);
  744. else
  745. mei_cl_bus_rx_event(cl);
  746. }
  747. }
  748. /**
  749. * mei_cl_all_disconnect - disconnect forcefully all connected clients
  750. *
  751. * @dev - mei device
  752. */
  753. void mei_cl_all_disconnect(struct mei_device *dev)
  754. {
  755. struct mei_cl *cl;
  756. list_for_each_entry(cl, &dev->file_list, link) {
  757. cl->state = MEI_FILE_DISCONNECTED;
  758. cl->mei_flow_ctrl_creds = 0;
  759. cl->timer_count = 0;
  760. }
  761. }
  762. /**
  763. * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
  764. *
  765. * @dev - mei device
  766. */
  767. void mei_cl_all_wakeup(struct mei_device *dev)
  768. {
  769. struct mei_cl *cl;
  770. list_for_each_entry(cl, &dev->file_list, link) {
  771. if (waitqueue_active(&cl->rx_wait)) {
  772. cl_dbg(dev, cl, "Waking up reading client!\n");
  773. wake_up_interruptible(&cl->rx_wait);
  774. }
  775. if (waitqueue_active(&cl->tx_wait)) {
  776. cl_dbg(dev, cl, "Waking up writing client!\n");
  777. wake_up_interruptible(&cl->tx_wait);
  778. }
  779. }
  780. }
  781. /**
  782. * mei_cl_all_write_clear - clear all pending writes
  783. * @dev - mei device
  784. */
  785. void mei_cl_all_write_clear(struct mei_device *dev)
  786. {
  787. struct mei_cl_cb *cb, *next;
  788. struct list_head *list;
  789. list = &dev->write_list.list;
  790. list_for_each_entry_safe(cb, next, list, list) {
  791. list_del(&cb->list);
  792. mei_io_cb_free(cb);
  793. }
  794. list = &dev->write_waiting_list.list;
  795. list_for_each_entry_safe(cb, next, list, list) {
  796. list_del(&cb->list);
  797. mei_io_cb_free(cb);
  798. }
  799. }