client.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2012, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #include <linux/sched.h>
  17. #include <linux/wait.h>
  18. #include <linux/delay.h>
  19. #include <linux/slab.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/mei.h>
  22. #include "mei_dev.h"
  23. #include "hbm.h"
  24. #include "client.h"
  25. /**
  26. * mei_me_cl_init - initialize me client
  27. *
  28. * @me_cl: me client
  29. */
  30. void mei_me_cl_init(struct mei_me_client *me_cl)
  31. {
  32. INIT_LIST_HEAD(&me_cl->list);
  33. kref_init(&me_cl->refcnt);
  34. }
  35. /**
  36. * mei_me_cl_get - increases me client refcount
  37. *
  38. * @me_cl: me client
  39. *
  40. * Locking: called under "dev->device_lock" lock
  41. *
  42. * Return: me client or NULL
  43. */
  44. struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
  45. {
  46. if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
  47. return me_cl;
  48. return NULL;
  49. }
  50. /**
  51. * mei_me_cl_release - free me client
  52. *
  53. * Locking: called under "dev->device_lock" lock
  54. *
  55. * @ref: me_client refcount
  56. */
  57. static void mei_me_cl_release(struct kref *ref)
  58. {
  59. struct mei_me_client *me_cl =
  60. container_of(ref, struct mei_me_client, refcnt);
  61. kfree(me_cl);
  62. }
  63. /**
  64. * mei_me_cl_put - decrease me client refcount and free client if necessary
  65. *
  66. * Locking: called under "dev->device_lock" lock
  67. *
  68. * @me_cl: me client
  69. */
  70. void mei_me_cl_put(struct mei_me_client *me_cl)
  71. {
  72. if (me_cl)
  73. kref_put(&me_cl->refcnt, mei_me_cl_release);
  74. }
  75. /**
  76. * __mei_me_cl_del - delete me client form the list and decrease
  77. * reference counter
  78. *
  79. * @dev: mei device
  80. * @me_cl: me client
  81. *
  82. * Locking: dev->me_clients_rwsem
  83. */
  84. static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
  85. {
  86. if (!me_cl)
  87. return;
  88. list_del(&me_cl->list);
  89. mei_me_cl_put(me_cl);
  90. }
  91. /**
  92. * mei_me_cl_add - add me client to the list
  93. *
  94. * @dev: mei device
  95. * @me_cl: me client
  96. */
  97. void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
  98. {
  99. down_write(&dev->me_clients_rwsem);
  100. list_add(&me_cl->list, &dev->me_clients);
  101. up_write(&dev->me_clients_rwsem);
  102. }
  103. /**
  104. * __mei_me_cl_by_uuid - locate me client by uuid
  105. * increases ref count
  106. *
  107. * @dev: mei device
  108. * @uuid: me client uuid
  109. *
  110. * Return: me client or NULL if not found
  111. *
  112. * Locking: dev->me_clients_rwsem
  113. */
  114. static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
  115. const uuid_le *uuid)
  116. {
  117. struct mei_me_client *me_cl;
  118. const uuid_le *pn;
  119. WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
  120. list_for_each_entry(me_cl, &dev->me_clients, list) {
  121. pn = &me_cl->props.protocol_name;
  122. if (uuid_le_cmp(*uuid, *pn) == 0)
  123. return mei_me_cl_get(me_cl);
  124. }
  125. return NULL;
  126. }
  127. /**
  128. * mei_me_cl_by_uuid - locate me client by uuid
  129. * increases ref count
  130. *
  131. * @dev: mei device
  132. * @uuid: me client uuid
  133. *
  134. * Return: me client or NULL if not found
  135. *
  136. * Locking: dev->me_clients_rwsem
  137. */
  138. struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
  139. const uuid_le *uuid)
  140. {
  141. struct mei_me_client *me_cl;
  142. down_read(&dev->me_clients_rwsem);
  143. me_cl = __mei_me_cl_by_uuid(dev, uuid);
  144. up_read(&dev->me_clients_rwsem);
  145. return me_cl;
  146. }
  147. /**
  148. * mei_me_cl_by_id - locate me client by client id
  149. * increases ref count
  150. *
  151. * @dev: the device structure
  152. * @client_id: me client id
  153. *
  154. * Return: me client or NULL if not found
  155. *
  156. * Locking: dev->me_clients_rwsem
  157. */
  158. struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
  159. {
  160. struct mei_me_client *__me_cl, *me_cl = NULL;
  161. down_read(&dev->me_clients_rwsem);
  162. list_for_each_entry(__me_cl, &dev->me_clients, list) {
  163. if (__me_cl->client_id == client_id) {
  164. me_cl = mei_me_cl_get(__me_cl);
  165. break;
  166. }
  167. }
  168. up_read(&dev->me_clients_rwsem);
  169. return me_cl;
  170. }
  171. /**
  172. * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
  173. * increases ref count
  174. *
  175. * @dev: the device structure
  176. * @uuid: me client uuid
  177. * @client_id: me client id
  178. *
  179. * Return: me client or null if not found
  180. *
  181. * Locking: dev->me_clients_rwsem
  182. */
  183. static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
  184. const uuid_le *uuid, u8 client_id)
  185. {
  186. struct mei_me_client *me_cl;
  187. const uuid_le *pn;
  188. WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
  189. list_for_each_entry(me_cl, &dev->me_clients, list) {
  190. pn = &me_cl->props.protocol_name;
  191. if (uuid_le_cmp(*uuid, *pn) == 0 &&
  192. me_cl->client_id == client_id)
  193. return mei_me_cl_get(me_cl);
  194. }
  195. return NULL;
  196. }
  197. /**
  198. * mei_me_cl_by_uuid_id - locate me client by client id and uuid
  199. * increases ref count
  200. *
  201. * @dev: the device structure
  202. * @uuid: me client uuid
  203. * @client_id: me client id
  204. *
  205. * Return: me client or null if not found
  206. */
  207. struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
  208. const uuid_le *uuid, u8 client_id)
  209. {
  210. struct mei_me_client *me_cl;
  211. down_read(&dev->me_clients_rwsem);
  212. me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
  213. up_read(&dev->me_clients_rwsem);
  214. return me_cl;
  215. }
  216. /**
  217. * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
  218. *
  219. * @dev: the device structure
  220. * @uuid: me client uuid
  221. *
  222. * Locking: called under "dev->device_lock" lock
  223. */
  224. void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
  225. {
  226. struct mei_me_client *me_cl;
  227. dev_dbg(dev->dev, "remove %pUl\n", uuid);
  228. down_write(&dev->me_clients_rwsem);
  229. me_cl = __mei_me_cl_by_uuid(dev, uuid);
  230. __mei_me_cl_del(dev, me_cl);
  231. up_write(&dev->me_clients_rwsem);
  232. }
  233. /**
  234. * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
  235. *
  236. * @dev: the device structure
  237. * @uuid: me client uuid
  238. * @id: me client id
  239. *
  240. * Locking: called under "dev->device_lock" lock
  241. */
  242. void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
  243. {
  244. struct mei_me_client *me_cl;
  245. dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
  246. down_write(&dev->me_clients_rwsem);
  247. me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
  248. __mei_me_cl_del(dev, me_cl);
  249. up_write(&dev->me_clients_rwsem);
  250. }
  251. /**
  252. * mei_me_cl_rm_all - remove all me clients
  253. *
  254. * @dev: the device structure
  255. *
  256. * Locking: called under "dev->device_lock" lock
  257. */
  258. void mei_me_cl_rm_all(struct mei_device *dev)
  259. {
  260. struct mei_me_client *me_cl, *next;
  261. down_write(&dev->me_clients_rwsem);
  262. list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
  263. __mei_me_cl_del(dev, me_cl);
  264. up_write(&dev->me_clients_rwsem);
  265. }
  266. /**
  267. * mei_cl_cmp_id - tells if the clients are the same
  268. *
  269. * @cl1: host client 1
  270. * @cl2: host client 2
  271. *
  272. * Return: true - if the clients has same host and me ids
  273. * false - otherwise
  274. */
  275. static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
  276. const struct mei_cl *cl2)
  277. {
  278. return cl1 && cl2 &&
  279. (cl1->host_client_id == cl2->host_client_id) &&
  280. (cl1->me_client_id == cl2->me_client_id);
  281. }
  282. /**
  283. * mei_io_cb_free - free mei_cb_private related memory
  284. *
  285. * @cb: mei callback struct
  286. */
  287. void mei_io_cb_free(struct mei_cl_cb *cb)
  288. {
  289. if (cb == NULL)
  290. return;
  291. list_del(&cb->list);
  292. kfree(cb->buf.data);
  293. kfree(cb);
  294. }
  295. /**
  296. * mei_io_cb_init - allocate and initialize io callback
  297. *
  298. * @cl: mei client
  299. * @type: operation type
  300. * @fp: pointer to file structure
  301. *
  302. * Return: mei_cl_cb pointer or NULL;
  303. */
  304. struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
  305. struct file *fp)
  306. {
  307. struct mei_cl_cb *cb;
  308. cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
  309. if (!cb)
  310. return NULL;
  311. INIT_LIST_HEAD(&cb->list);
  312. cb->file_object = fp;
  313. cb->cl = cl;
  314. cb->buf_idx = 0;
  315. cb->fop_type = type;
  316. return cb;
  317. }
  318. /**
  319. * __mei_io_list_flush - removes and frees cbs belonging to cl.
  320. *
  321. * @list: an instance of our list structure
  322. * @cl: host client, can be NULL for flushing the whole list
  323. * @free: whether to free the cbs
  324. */
  325. static void __mei_io_list_flush(struct mei_cl_cb *list,
  326. struct mei_cl *cl, bool free)
  327. {
  328. struct mei_cl_cb *cb, *next;
  329. /* enable removing everything if no cl is specified */
  330. list_for_each_entry_safe(cb, next, &list->list, list) {
  331. if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
  332. list_del_init(&cb->list);
  333. if (free)
  334. mei_io_cb_free(cb);
  335. }
  336. }
  337. }
  338. /**
  339. * mei_io_list_flush - removes list entry belonging to cl.
  340. *
  341. * @list: An instance of our list structure
  342. * @cl: host client
  343. */
  344. void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
  345. {
  346. __mei_io_list_flush(list, cl, false);
  347. }
  348. /**
  349. * mei_io_list_free - removes cb belonging to cl and free them
  350. *
  351. * @list: An instance of our list structure
  352. * @cl: host client
  353. */
  354. static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
  355. {
  356. __mei_io_list_flush(list, cl, true);
  357. }
  358. /**
  359. * mei_io_cb_alloc_buf - allocate callback buffer
  360. *
  361. * @cb: io callback structure
  362. * @length: size of the buffer
  363. *
  364. * Return: 0 on success
  365. * -EINVAL if cb is NULL
  366. * -ENOMEM if allocation failed
  367. */
  368. int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length)
  369. {
  370. if (!cb)
  371. return -EINVAL;
  372. if (length == 0)
  373. return 0;
  374. cb->buf.data = kmalloc(length, GFP_KERNEL);
  375. if (!cb->buf.data)
  376. return -ENOMEM;
  377. cb->buf.size = length;
  378. return 0;
  379. }
  380. /**
  381. * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
  382. *
  383. * @cl: host client
  384. * @length: size of the buffer
  385. * @type: operation type
  386. * @fp: associated file pointer (might be NULL)
  387. *
  388. * Return: cb on success and NULL on failure
  389. */
  390. struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
  391. enum mei_cb_file_ops type, struct file *fp)
  392. {
  393. struct mei_cl_cb *cb;
  394. cb = mei_io_cb_init(cl, type, fp);
  395. if (!cb)
  396. return NULL;
  397. if (mei_io_cb_alloc_buf(cb, length)) {
  398. mei_io_cb_free(cb);
  399. return NULL;
  400. }
  401. return cb;
  402. }
  403. /**
  404. * mei_cl_read_cb - find this cl's callback in the read list
  405. * for a specific file
  406. *
  407. * @cl: host client
  408. * @fp: file pointer (matching cb file object), may be NULL
  409. *
  410. * Return: cb on success, NULL if cb is not found
  411. */
  412. struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
  413. {
  414. struct mei_cl_cb *cb;
  415. list_for_each_entry(cb, &cl->rd_completed, list)
  416. if (!fp || fp == cb->file_object)
  417. return cb;
  418. return NULL;
  419. }
  420. /**
  421. * mei_cl_read_cb_flush - free client's read pending and completed cbs
  422. * for a specific file
  423. *
  424. * @cl: host client
  425. * @fp: file pointer (matching cb file object), may be NULL
  426. */
  427. void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
  428. {
  429. struct mei_cl_cb *cb, *next;
  430. list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
  431. if (!fp || fp == cb->file_object)
  432. mei_io_cb_free(cb);
  433. list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
  434. if (!fp || fp == cb->file_object)
  435. mei_io_cb_free(cb);
  436. }
  437. /**
  438. * mei_cl_flush_queues - flushes queue lists belonging to cl.
  439. *
  440. * @cl: host client
  441. * @fp: file pointer (matching cb file object), may be NULL
  442. *
  443. * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
  444. */
  445. int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
  446. {
  447. struct mei_device *dev;
  448. if (WARN_ON(!cl || !cl->dev))
  449. return -EINVAL;
  450. dev = cl->dev;
  451. cl_dbg(dev, cl, "remove list entry belonging to cl\n");
  452. mei_io_list_free(&cl->dev->write_list, cl);
  453. mei_io_list_free(&cl->dev->write_waiting_list, cl);
  454. mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
  455. mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
  456. mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
  457. mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
  458. mei_cl_read_cb_flush(cl, fp);
  459. return 0;
  460. }
  461. /**
  462. * mei_cl_init - initializes cl.
  463. *
  464. * @cl: host client to be initialized
  465. * @dev: mei device
  466. */
  467. void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
  468. {
  469. memset(cl, 0, sizeof(struct mei_cl));
  470. init_waitqueue_head(&cl->wait);
  471. init_waitqueue_head(&cl->rx_wait);
  472. init_waitqueue_head(&cl->tx_wait);
  473. INIT_LIST_HEAD(&cl->rd_completed);
  474. INIT_LIST_HEAD(&cl->rd_pending);
  475. INIT_LIST_HEAD(&cl->link);
  476. INIT_LIST_HEAD(&cl->device_link);
  477. cl->writing_state = MEI_IDLE;
  478. cl->dev = dev;
  479. }
  480. /**
  481. * mei_cl_allocate - allocates cl structure and sets it up.
  482. *
  483. * @dev: mei device
  484. * Return: The allocated file or NULL on failure
  485. */
  486. struct mei_cl *mei_cl_allocate(struct mei_device *dev)
  487. {
  488. struct mei_cl *cl;
  489. cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
  490. if (!cl)
  491. return NULL;
  492. mei_cl_init(cl, dev);
  493. return cl;
  494. }
  495. /**
  496. * mei_cl_link - allocate host id in the host map
  497. *
  498. * @cl: host client
  499. * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
  500. *
  501. * Return: 0 on success
  502. * -EINVAL on incorrect values
  503. * -EMFILE if open count exceeded.
  504. */
  505. int mei_cl_link(struct mei_cl *cl, int id)
  506. {
  507. struct mei_device *dev;
  508. long open_handle_count;
  509. if (WARN_ON(!cl || !cl->dev))
  510. return -EINVAL;
  511. dev = cl->dev;
  512. /* If Id is not assigned get one*/
  513. if (id == MEI_HOST_CLIENT_ID_ANY)
  514. id = find_first_zero_bit(dev->host_clients_map,
  515. MEI_CLIENTS_MAX);
  516. if (id >= MEI_CLIENTS_MAX) {
  517. dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
  518. return -EMFILE;
  519. }
  520. open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
  521. if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
  522. dev_err(dev->dev, "open_handle_count exceeded %d",
  523. MEI_MAX_OPEN_HANDLE_COUNT);
  524. return -EMFILE;
  525. }
  526. dev->open_handle_count++;
  527. cl->host_client_id = id;
  528. list_add_tail(&cl->link, &dev->file_list);
  529. set_bit(id, dev->host_clients_map);
  530. cl->state = MEI_FILE_INITIALIZING;
  531. cl_dbg(dev, cl, "link cl\n");
  532. return 0;
  533. }
  534. /**
  535. * mei_cl_unlink - remove me_cl from the list
  536. *
  537. * @cl: host client
  538. *
  539. * Return: always 0
  540. */
  541. int mei_cl_unlink(struct mei_cl *cl)
  542. {
  543. struct mei_device *dev;
  544. /* don't shout on error exit path */
  545. if (!cl)
  546. return 0;
  547. /* wd and amthif might not be initialized */
  548. if (!cl->dev)
  549. return 0;
  550. dev = cl->dev;
  551. cl_dbg(dev, cl, "unlink client");
  552. if (dev->open_handle_count > 0)
  553. dev->open_handle_count--;
  554. /* never clear the 0 bit */
  555. if (cl->host_client_id)
  556. clear_bit(cl->host_client_id, dev->host_clients_map);
  557. list_del_init(&cl->link);
  558. cl->state = MEI_FILE_INITIALIZING;
  559. return 0;
  560. }
  561. void mei_host_client_init(struct work_struct *work)
  562. {
  563. struct mei_device *dev =
  564. container_of(work, struct mei_device, init_work);
  565. struct mei_me_client *me_cl;
  566. mutex_lock(&dev->device_lock);
  567. me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
  568. if (me_cl)
  569. mei_amthif_host_init(dev);
  570. mei_me_cl_put(me_cl);
  571. me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
  572. if (me_cl)
  573. mei_wd_host_init(dev);
  574. mei_me_cl_put(me_cl);
  575. me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
  576. if (me_cl)
  577. mei_nfc_host_init(dev);
  578. mei_me_cl_put(me_cl);
  579. dev->dev_state = MEI_DEV_ENABLED;
  580. dev->reset_count = 0;
  581. mutex_unlock(&dev->device_lock);
  582. pm_runtime_mark_last_busy(dev->dev);
  583. dev_dbg(dev->dev, "rpm: autosuspend\n");
  584. pm_runtime_autosuspend(dev->dev);
  585. }
  586. /**
  587. * mei_hbuf_acquire - try to acquire host buffer
  588. *
  589. * @dev: the device structure
  590. * Return: true if host buffer was acquired
  591. */
  592. bool mei_hbuf_acquire(struct mei_device *dev)
  593. {
  594. if (mei_pg_state(dev) == MEI_PG_ON ||
  595. dev->pg_event == MEI_PG_EVENT_WAIT) {
  596. dev_dbg(dev->dev, "device is in pg\n");
  597. return false;
  598. }
  599. if (!dev->hbuf_is_ready) {
  600. dev_dbg(dev->dev, "hbuf is not ready\n");
  601. return false;
  602. }
  603. dev->hbuf_is_ready = false;
  604. return true;
  605. }
  606. /**
  607. * mei_cl_disconnect - disconnect host client from the me one
  608. *
  609. * @cl: host client
  610. *
  611. * Locking: called under "dev->device_lock" lock
  612. *
  613. * Return: 0 on success, <0 on failure.
  614. */
  615. int mei_cl_disconnect(struct mei_cl *cl)
  616. {
  617. struct mei_device *dev;
  618. struct mei_cl_cb *cb;
  619. int rets;
  620. if (WARN_ON(!cl || !cl->dev))
  621. return -ENODEV;
  622. dev = cl->dev;
  623. cl_dbg(dev, cl, "disconnecting");
  624. if (cl->state != MEI_FILE_DISCONNECTING)
  625. return 0;
  626. rets = pm_runtime_get(dev->dev);
  627. if (rets < 0 && rets != -EINPROGRESS) {
  628. pm_runtime_put_noidle(dev->dev);
  629. cl_err(dev, cl, "rpm: get failed %d\n", rets);
  630. return rets;
  631. }
  632. cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
  633. rets = cb ? 0 : -ENOMEM;
  634. if (rets)
  635. goto free;
  636. if (mei_hbuf_acquire(dev)) {
  637. if (mei_hbm_cl_disconnect_req(dev, cl)) {
  638. rets = -ENODEV;
  639. cl_err(dev, cl, "failed to disconnect.\n");
  640. goto free;
  641. }
  642. cl->timer_count = MEI_CONNECT_TIMEOUT;
  643. mdelay(10); /* Wait for hardware disconnection ready */
  644. list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
  645. } else {
  646. cl_dbg(dev, cl, "add disconnect cb to control write list\n");
  647. list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
  648. }
  649. mutex_unlock(&dev->device_lock);
  650. wait_event_timeout(cl->wait,
  651. MEI_FILE_DISCONNECTED == cl->state,
  652. mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
  653. mutex_lock(&dev->device_lock);
  654. if (MEI_FILE_DISCONNECTED == cl->state) {
  655. rets = 0;
  656. cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
  657. } else {
  658. cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
  659. rets = -ETIME;
  660. }
  661. mei_io_list_flush(&dev->ctrl_rd_list, cl);
  662. mei_io_list_flush(&dev->ctrl_wr_list, cl);
  663. free:
  664. cl_dbg(dev, cl, "rpm: autosuspend\n");
  665. pm_runtime_mark_last_busy(dev->dev);
  666. pm_runtime_put_autosuspend(dev->dev);
  667. mei_io_cb_free(cb);
  668. return rets;
  669. }
  670. /**
  671. * mei_cl_is_other_connecting - checks if other
  672. * client with the same me client id is connecting
  673. *
  674. * @cl: private data of the file object
  675. *
  676. * Return: true if other client is connected, false - otherwise.
  677. */
  678. bool mei_cl_is_other_connecting(struct mei_cl *cl)
  679. {
  680. struct mei_device *dev;
  681. struct mei_cl *ocl; /* the other client */
  682. if (WARN_ON(!cl || !cl->dev))
  683. return false;
  684. dev = cl->dev;
  685. list_for_each_entry(ocl, &dev->file_list, link) {
  686. if (ocl->state == MEI_FILE_CONNECTING &&
  687. ocl != cl &&
  688. cl->me_client_id == ocl->me_client_id)
  689. return true;
  690. }
  691. return false;
  692. }
  693. /**
  694. * mei_cl_connect - connect host client to the me one
  695. *
  696. * @cl: host client
  697. * @file: pointer to file structure
  698. *
  699. * Locking: called under "dev->device_lock" lock
  700. *
  701. * Return: 0 on success, <0 on failure.
  702. */
  703. int mei_cl_connect(struct mei_cl *cl, struct file *file)
  704. {
  705. struct mei_device *dev;
  706. struct mei_cl_cb *cb;
  707. int rets;
  708. if (WARN_ON(!cl || !cl->dev))
  709. return -ENODEV;
  710. dev = cl->dev;
  711. rets = pm_runtime_get(dev->dev);
  712. if (rets < 0 && rets != -EINPROGRESS) {
  713. pm_runtime_put_noidle(dev->dev);
  714. cl_err(dev, cl, "rpm: get failed %d\n", rets);
  715. return rets;
  716. }
  717. cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file);
  718. rets = cb ? 0 : -ENOMEM;
  719. if (rets)
  720. goto out;
  721. /* run hbuf acquire last so we don't have to undo */
  722. if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
  723. cl->state = MEI_FILE_CONNECTING;
  724. if (mei_hbm_cl_connect_req(dev, cl)) {
  725. rets = -ENODEV;
  726. goto out;
  727. }
  728. cl->timer_count = MEI_CONNECT_TIMEOUT;
  729. list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
  730. } else {
  731. cl->state = MEI_FILE_INITIALIZING;
  732. list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
  733. }
  734. mutex_unlock(&dev->device_lock);
  735. wait_event_timeout(cl->wait,
  736. (cl->state == MEI_FILE_CONNECTED ||
  737. cl->state == MEI_FILE_DISCONNECTED),
  738. mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
  739. mutex_lock(&dev->device_lock);
  740. if (!mei_cl_is_connected(cl)) {
  741. cl->state = MEI_FILE_DISCONNECTED;
  742. /* something went really wrong */
  743. if (!cl->status)
  744. cl->status = -EFAULT;
  745. mei_io_list_flush(&dev->ctrl_rd_list, cl);
  746. mei_io_list_flush(&dev->ctrl_wr_list, cl);
  747. }
  748. rets = cl->status;
  749. out:
  750. cl_dbg(dev, cl, "rpm: autosuspend\n");
  751. pm_runtime_mark_last_busy(dev->dev);
  752. pm_runtime_put_autosuspend(dev->dev);
  753. mei_io_cb_free(cb);
  754. return rets;
  755. }
  756. /**
  757. * mei_cl_alloc_linked - allocate and link host client
  758. *
  759. * @dev: the device structure
  760. * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
  761. *
  762. * Return: cl on success ERR_PTR on failure
  763. */
  764. struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
  765. {
  766. struct mei_cl *cl;
  767. int ret;
  768. cl = mei_cl_allocate(dev);
  769. if (!cl) {
  770. ret = -ENOMEM;
  771. goto err;
  772. }
  773. ret = mei_cl_link(cl, id);
  774. if (ret)
  775. goto err;
  776. return cl;
  777. err:
  778. kfree(cl);
  779. return ERR_PTR(ret);
  780. }
  781. /**
  782. * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
  783. *
  784. * @cl: private data of the file object
  785. *
  786. * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
  787. * -ENOENT if mei_cl is not present
  788. * -EINVAL if single_recv_buf == 0
  789. */
  790. int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
  791. {
  792. struct mei_device *dev;
  793. struct mei_me_client *me_cl;
  794. int rets = 0;
  795. if (WARN_ON(!cl || !cl->dev))
  796. return -EINVAL;
  797. dev = cl->dev;
  798. if (cl->mei_flow_ctrl_creds > 0)
  799. return 1;
  800. me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
  801. if (!me_cl) {
  802. cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
  803. return -ENOENT;
  804. }
  805. if (me_cl->mei_flow_ctrl_creds > 0) {
  806. rets = 1;
  807. if (WARN_ON(me_cl->props.single_recv_buf == 0))
  808. rets = -EINVAL;
  809. }
  810. mei_me_cl_put(me_cl);
  811. return rets;
  812. }
  813. /**
  814. * mei_cl_flow_ctrl_reduce - reduces flow_control.
  815. *
  816. * @cl: private data of the file object
  817. *
  818. * Return:
  819. * 0 on success
  820. * -ENOENT when me client is not found
  821. * -EINVAL when ctrl credits are <= 0
  822. */
  823. int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
  824. {
  825. struct mei_device *dev;
  826. struct mei_me_client *me_cl;
  827. int rets;
  828. if (WARN_ON(!cl || !cl->dev))
  829. return -EINVAL;
  830. dev = cl->dev;
  831. me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
  832. if (!me_cl) {
  833. cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
  834. return -ENOENT;
  835. }
  836. if (me_cl->props.single_recv_buf) {
  837. if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) {
  838. rets = -EINVAL;
  839. goto out;
  840. }
  841. me_cl->mei_flow_ctrl_creds--;
  842. } else {
  843. if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) {
  844. rets = -EINVAL;
  845. goto out;
  846. }
  847. cl->mei_flow_ctrl_creds--;
  848. }
  849. rets = 0;
  850. out:
  851. mei_me_cl_put(me_cl);
  852. return rets;
  853. }
  854. /**
  855. * mei_cl_read_start - the start read client message function.
  856. *
  857. * @cl: host client
  858. * @length: number of bytes to read
  859. * @fp: pointer to file structure
  860. *
  861. * Return: 0 on success, <0 on failure.
  862. */
  863. int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
  864. {
  865. struct mei_device *dev;
  866. struct mei_cl_cb *cb;
  867. struct mei_me_client *me_cl;
  868. int rets;
  869. if (WARN_ON(!cl || !cl->dev))
  870. return -ENODEV;
  871. dev = cl->dev;
  872. if (!mei_cl_is_connected(cl))
  873. return -ENODEV;
  874. /* HW currently supports only one pending read */
  875. if (!list_empty(&cl->rd_pending))
  876. return -EBUSY;
  877. me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id);
  878. if (!me_cl) {
  879. cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
  880. return -ENOTTY;
  881. }
  882. /* always allocate at least client max message */
  883. length = max_t(size_t, length, me_cl->props.max_msg_length);
  884. mei_me_cl_put(me_cl);
  885. rets = pm_runtime_get(dev->dev);
  886. if (rets < 0 && rets != -EINPROGRESS) {
  887. pm_runtime_put_noidle(dev->dev);
  888. cl_err(dev, cl, "rpm: get failed %d\n", rets);
  889. return rets;
  890. }
  891. cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp);
  892. rets = cb ? 0 : -ENOMEM;
  893. if (rets)
  894. goto out;
  895. if (mei_hbuf_acquire(dev)) {
  896. rets = mei_hbm_cl_flow_control_req(dev, cl);
  897. if (rets < 0)
  898. goto out;
  899. list_add_tail(&cb->list, &cl->rd_pending);
  900. } else {
  901. list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
  902. }
  903. out:
  904. cl_dbg(dev, cl, "rpm: autosuspend\n");
  905. pm_runtime_mark_last_busy(dev->dev);
  906. pm_runtime_put_autosuspend(dev->dev);
  907. if (rets)
  908. mei_io_cb_free(cb);
  909. return rets;
  910. }
  911. /**
  912. * mei_cl_irq_write - write a message to device
  913. * from the interrupt thread context
  914. *
  915. * @cl: client
  916. * @cb: callback block.
  917. * @cmpl_list: complete list.
  918. *
  919. * Return: 0, OK; otherwise error.
  920. */
  921. int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
  922. struct mei_cl_cb *cmpl_list)
  923. {
  924. struct mei_device *dev;
  925. struct mei_msg_data *buf;
  926. struct mei_msg_hdr mei_hdr;
  927. size_t len;
  928. u32 msg_slots;
  929. int slots;
  930. int rets;
  931. if (WARN_ON(!cl || !cl->dev))
  932. return -ENODEV;
  933. dev = cl->dev;
  934. buf = &cb->buf;
  935. rets = mei_cl_flow_ctrl_creds(cl);
  936. if (rets < 0)
  937. return rets;
  938. if (rets == 0) {
  939. cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
  940. return 0;
  941. }
  942. slots = mei_hbuf_empty_slots(dev);
  943. len = buf->size - cb->buf_idx;
  944. msg_slots = mei_data2slots(len);
  945. mei_hdr.host_addr = cl->host_client_id;
  946. mei_hdr.me_addr = cl->me_client_id;
  947. mei_hdr.reserved = 0;
  948. mei_hdr.internal = cb->internal;
  949. if (slots >= msg_slots) {
  950. mei_hdr.length = len;
  951. mei_hdr.msg_complete = 1;
  952. /* Split the message only if we can write the whole host buffer */
  953. } else if (slots == dev->hbuf_depth) {
  954. msg_slots = slots;
  955. len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
  956. mei_hdr.length = len;
  957. mei_hdr.msg_complete = 0;
  958. } else {
  959. /* wait for next time the host buffer is empty */
  960. return 0;
  961. }
  962. cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
  963. cb->buf.size, cb->buf_idx);
  964. rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
  965. if (rets) {
  966. cl->status = rets;
  967. list_move_tail(&cb->list, &cmpl_list->list);
  968. return rets;
  969. }
  970. cl->status = 0;
  971. cl->writing_state = MEI_WRITING;
  972. cb->buf_idx += mei_hdr.length;
  973. cb->completed = mei_hdr.msg_complete == 1;
  974. if (mei_hdr.msg_complete) {
  975. if (mei_cl_flow_ctrl_reduce(cl))
  976. return -EIO;
  977. list_move_tail(&cb->list, &dev->write_waiting_list.list);
  978. }
  979. return 0;
  980. }
  981. /**
  982. * mei_cl_write - submit a write cb to mei device
  983. * assumes device_lock is locked
  984. *
  985. * @cl: host client
  986. * @cb: write callback with filled data
  987. * @blocking: block until completed
  988. *
  989. * Return: number of bytes sent on success, <0 on failure.
  990. */
  991. int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
  992. {
  993. struct mei_device *dev;
  994. struct mei_msg_data *buf;
  995. struct mei_msg_hdr mei_hdr;
  996. int rets;
  997. if (WARN_ON(!cl || !cl->dev))
  998. return -ENODEV;
  999. if (WARN_ON(!cb))
  1000. return -EINVAL;
  1001. dev = cl->dev;
  1002. buf = &cb->buf;
  1003. cl_dbg(dev, cl, "size=%d\n", buf->size);
  1004. rets = pm_runtime_get(dev->dev);
  1005. if (rets < 0 && rets != -EINPROGRESS) {
  1006. pm_runtime_put_noidle(dev->dev);
  1007. cl_err(dev, cl, "rpm: get failed %d\n", rets);
  1008. return rets;
  1009. }
  1010. cb->buf_idx = 0;
  1011. cl->writing_state = MEI_IDLE;
  1012. mei_hdr.host_addr = cl->host_client_id;
  1013. mei_hdr.me_addr = cl->me_client_id;
  1014. mei_hdr.reserved = 0;
  1015. mei_hdr.msg_complete = 0;
  1016. mei_hdr.internal = cb->internal;
  1017. rets = mei_cl_flow_ctrl_creds(cl);
  1018. if (rets < 0)
  1019. goto err;
  1020. if (rets == 0) {
  1021. cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
  1022. rets = buf->size;
  1023. goto out;
  1024. }
  1025. if (!mei_hbuf_acquire(dev)) {
  1026. cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
  1027. rets = buf->size;
  1028. goto out;
  1029. }
  1030. /* Check for a maximum length */
  1031. if (buf->size > mei_hbuf_max_len(dev)) {
  1032. mei_hdr.length = mei_hbuf_max_len(dev);
  1033. mei_hdr.msg_complete = 0;
  1034. } else {
  1035. mei_hdr.length = buf->size;
  1036. mei_hdr.msg_complete = 1;
  1037. }
  1038. rets = mei_write_message(dev, &mei_hdr, buf->data);
  1039. if (rets)
  1040. goto err;
  1041. cl->writing_state = MEI_WRITING;
  1042. cb->buf_idx = mei_hdr.length;
  1043. cb->completed = mei_hdr.msg_complete == 1;
  1044. out:
  1045. if (mei_hdr.msg_complete) {
  1046. rets = mei_cl_flow_ctrl_reduce(cl);
  1047. if (rets < 0)
  1048. goto err;
  1049. list_add_tail(&cb->list, &dev->write_waiting_list.list);
  1050. } else {
  1051. list_add_tail(&cb->list, &dev->write_list.list);
  1052. }
  1053. if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
  1054. mutex_unlock(&dev->device_lock);
  1055. rets = wait_event_interruptible(cl->tx_wait,
  1056. cl->writing_state == MEI_WRITE_COMPLETE);
  1057. mutex_lock(&dev->device_lock);
  1058. /* wait_event_interruptible returns -ERESTARTSYS */
  1059. if (rets) {
  1060. if (signal_pending(current))
  1061. rets = -EINTR;
  1062. goto err;
  1063. }
  1064. }
  1065. rets = buf->size;
  1066. err:
  1067. cl_dbg(dev, cl, "rpm: autosuspend\n");
  1068. pm_runtime_mark_last_busy(dev->dev);
  1069. pm_runtime_put_autosuspend(dev->dev);
  1070. return rets;
  1071. }
  1072. /**
  1073. * mei_cl_complete - processes completed operation for a client
  1074. *
  1075. * @cl: private data of the file object.
  1076. * @cb: callback block.
  1077. */
  1078. void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
  1079. {
  1080. if (cb->fop_type == MEI_FOP_WRITE) {
  1081. mei_io_cb_free(cb);
  1082. cb = NULL;
  1083. cl->writing_state = MEI_WRITE_COMPLETE;
  1084. if (waitqueue_active(&cl->tx_wait))
  1085. wake_up_interruptible(&cl->tx_wait);
  1086. } else if (cb->fop_type == MEI_FOP_READ) {
  1087. list_add_tail(&cb->list, &cl->rd_completed);
  1088. if (waitqueue_active(&cl->rx_wait))
  1089. wake_up_interruptible_all(&cl->rx_wait);
  1090. else
  1091. mei_cl_bus_rx_event(cl);
  1092. }
  1093. }
  1094. /**
  1095. * mei_cl_all_disconnect - disconnect forcefully all connected clients
  1096. *
  1097. * @dev: mei device
  1098. */
  1099. void mei_cl_all_disconnect(struct mei_device *dev)
  1100. {
  1101. struct mei_cl *cl;
  1102. list_for_each_entry(cl, &dev->file_list, link) {
  1103. cl->state = MEI_FILE_DISCONNECTED;
  1104. cl->mei_flow_ctrl_creds = 0;
  1105. cl->timer_count = 0;
  1106. }
  1107. }
  1108. /**
  1109. * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
  1110. *
  1111. * @dev: mei device
  1112. */
  1113. void mei_cl_all_wakeup(struct mei_device *dev)
  1114. {
  1115. struct mei_cl *cl;
  1116. list_for_each_entry(cl, &dev->file_list, link) {
  1117. if (waitqueue_active(&cl->rx_wait)) {
  1118. cl_dbg(dev, cl, "Waking up reading client!\n");
  1119. wake_up_interruptible(&cl->rx_wait);
  1120. }
  1121. if (waitqueue_active(&cl->tx_wait)) {
  1122. cl_dbg(dev, cl, "Waking up writing client!\n");
  1123. wake_up_interruptible(&cl->tx_wait);
  1124. }
  1125. }
  1126. }
  1127. /**
  1128. * mei_cl_all_write_clear - clear all pending writes
  1129. *
  1130. * @dev: mei device
  1131. */
  1132. void mei_cl_all_write_clear(struct mei_device *dev)
  1133. {
  1134. mei_io_list_free(&dev->write_list, NULL);
  1135. mei_io_list_free(&dev->write_waiting_list, NULL);
  1136. }