cec-api.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /*
  2. * cec-api.c - HDMI Consumer Electronics Control framework - API
  3. *
  4. * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
  5. *
  6. * This program is free software; you may redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 of the License.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  11. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  12. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  13. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  14. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  15. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  16. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  17. * SOFTWARE.
  18. */
  19. #include <linux/errno.h>
  20. #include <linux/init.h>
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/kmod.h>
  24. #include <linux/ktime.h>
  25. #include <linux/slab.h>
  26. #include <linux/mm.h>
  27. #include <linux/string.h>
  28. #include <linux/types.h>
  29. #include <linux/uaccess.h>
  30. #include <linux/version.h>
  31. #include <media/cec-pin.h>
  32. #include "cec-priv.h"
  33. #include "cec-pin-priv.h"
  34. static inline struct cec_devnode *cec_devnode_data(struct file *filp)
  35. {
  36. struct cec_fh *fh = filp->private_data;
  37. return &fh->adap->devnode;
  38. }
  39. /* CEC file operations */
  40. static __poll_t cec_poll(struct file *filp,
  41. struct poll_table_struct *poll)
  42. {
  43. struct cec_devnode *devnode = cec_devnode_data(filp);
  44. struct cec_fh *fh = filp->private_data;
  45. struct cec_adapter *adap = fh->adap;
  46. __poll_t res = 0;
  47. if (!devnode->registered)
  48. return POLLERR | POLLHUP;
  49. mutex_lock(&adap->lock);
  50. if (adap->is_configured &&
  51. adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
  52. res |= POLLOUT | POLLWRNORM;
  53. if (fh->queued_msgs)
  54. res |= POLLIN | POLLRDNORM;
  55. if (fh->total_queued_events)
  56. res |= POLLPRI;
  57. poll_wait(filp, &fh->wait, poll);
  58. mutex_unlock(&adap->lock);
  59. return res;
  60. }
  61. static bool cec_is_busy(const struct cec_adapter *adap,
  62. const struct cec_fh *fh)
  63. {
  64. bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
  65. bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
  66. /*
  67. * Exclusive initiators and followers can always access the CEC adapter
  68. */
  69. if (valid_initiator || valid_follower)
  70. return false;
  71. /*
  72. * All others can only access the CEC adapter if there is no
  73. * exclusive initiator and they are in INITIATOR mode.
  74. */
  75. return adap->cec_initiator ||
  76. fh->mode_initiator == CEC_MODE_NO_INITIATOR;
  77. }
  78. static long cec_adap_g_caps(struct cec_adapter *adap,
  79. struct cec_caps __user *parg)
  80. {
  81. struct cec_caps caps = {};
  82. strlcpy(caps.driver, adap->devnode.dev.parent->driver->name,
  83. sizeof(caps.driver));
  84. strlcpy(caps.name, adap->name, sizeof(caps.name));
  85. caps.available_log_addrs = adap->available_log_addrs;
  86. caps.capabilities = adap->capabilities;
  87. caps.version = LINUX_VERSION_CODE;
  88. if (copy_to_user(parg, &caps, sizeof(caps)))
  89. return -EFAULT;
  90. return 0;
  91. }
  92. static long cec_adap_g_phys_addr(struct cec_adapter *adap,
  93. __u16 __user *parg)
  94. {
  95. u16 phys_addr;
  96. mutex_lock(&adap->lock);
  97. phys_addr = adap->phys_addr;
  98. mutex_unlock(&adap->lock);
  99. if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
  100. return -EFAULT;
  101. return 0;
  102. }
  103. static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
  104. bool block, __u16 __user *parg)
  105. {
  106. u16 phys_addr;
  107. long err;
  108. if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
  109. return -ENOTTY;
  110. if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
  111. return -EFAULT;
  112. err = cec_phys_addr_validate(phys_addr, NULL, NULL);
  113. if (err)
  114. return err;
  115. mutex_lock(&adap->lock);
  116. if (cec_is_busy(adap, fh))
  117. err = -EBUSY;
  118. else
  119. __cec_s_phys_addr(adap, phys_addr, block);
  120. mutex_unlock(&adap->lock);
  121. return err;
  122. }
  123. static long cec_adap_g_log_addrs(struct cec_adapter *adap,
  124. struct cec_log_addrs __user *parg)
  125. {
  126. struct cec_log_addrs log_addrs;
  127. mutex_lock(&adap->lock);
  128. log_addrs = adap->log_addrs;
  129. if (!adap->is_configured)
  130. memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
  131. sizeof(log_addrs.log_addr));
  132. mutex_unlock(&adap->lock);
  133. if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
  134. return -EFAULT;
  135. return 0;
  136. }
  137. static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
  138. bool block, struct cec_log_addrs __user *parg)
  139. {
  140. struct cec_log_addrs log_addrs;
  141. long err = -EBUSY;
  142. if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
  143. return -ENOTTY;
  144. if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
  145. return -EFAULT;
  146. log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
  147. CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
  148. CEC_LOG_ADDRS_FL_CDC_ONLY;
  149. mutex_lock(&adap->lock);
  150. if (!adap->is_configuring &&
  151. (!log_addrs.num_log_addrs || !adap->is_configured) &&
  152. !cec_is_busy(adap, fh)) {
  153. err = __cec_s_log_addrs(adap, &log_addrs, block);
  154. if (!err)
  155. log_addrs = adap->log_addrs;
  156. }
  157. mutex_unlock(&adap->lock);
  158. if (err)
  159. return err;
  160. if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
  161. return -EFAULT;
  162. return 0;
  163. }
  164. static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
  165. bool block, struct cec_msg __user *parg)
  166. {
  167. struct cec_msg msg = {};
  168. long err = 0;
  169. if (!(adap->capabilities & CEC_CAP_TRANSMIT))
  170. return -ENOTTY;
  171. if (copy_from_user(&msg, parg, sizeof(msg)))
  172. return -EFAULT;
  173. /* A CDC-Only device can only send CDC messages */
  174. if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
  175. (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
  176. return -EINVAL;
  177. mutex_lock(&adap->lock);
  178. if (adap->log_addrs.num_log_addrs == 0)
  179. err = -EPERM;
  180. else if (adap->is_configuring)
  181. err = -ENONET;
  182. else if (!adap->is_configured &&
  183. (adap->needs_hpd || msg.msg[0] != 0xf0))
  184. err = -ENONET;
  185. else if (cec_is_busy(adap, fh))
  186. err = -EBUSY;
  187. else
  188. err = cec_transmit_msg_fh(adap, &msg, fh, block);
  189. mutex_unlock(&adap->lock);
  190. if (err)
  191. return err;
  192. if (copy_to_user(parg, &msg, sizeof(msg)))
  193. return -EFAULT;
  194. return 0;
  195. }
  196. /* Called by CEC_RECEIVE: wait for a message to arrive */
  197. static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
  198. {
  199. u32 timeout = msg->timeout;
  200. int res;
  201. do {
  202. mutex_lock(&fh->lock);
  203. /* Are there received messages queued up? */
  204. if (fh->queued_msgs) {
  205. /* Yes, return the first one */
  206. struct cec_msg_entry *entry =
  207. list_first_entry(&fh->msgs,
  208. struct cec_msg_entry, list);
  209. list_del(&entry->list);
  210. *msg = entry->msg;
  211. kfree(entry);
  212. fh->queued_msgs--;
  213. mutex_unlock(&fh->lock);
  214. /* restore original timeout value */
  215. msg->timeout = timeout;
  216. return 0;
  217. }
  218. /* No, return EAGAIN in non-blocking mode or wait */
  219. mutex_unlock(&fh->lock);
  220. /* Return when in non-blocking mode */
  221. if (!block)
  222. return -EAGAIN;
  223. if (msg->timeout) {
  224. /* The user specified a timeout */
  225. res = wait_event_interruptible_timeout(fh->wait,
  226. fh->queued_msgs,
  227. msecs_to_jiffies(msg->timeout));
  228. if (res == 0)
  229. res = -ETIMEDOUT;
  230. else if (res > 0)
  231. res = 0;
  232. } else {
  233. /* Wait indefinitely */
  234. res = wait_event_interruptible(fh->wait,
  235. fh->queued_msgs);
  236. }
  237. /* Exit on error, otherwise loop to get the new message */
  238. } while (!res);
  239. return res;
  240. }
  241. static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
  242. bool block, struct cec_msg __user *parg)
  243. {
  244. struct cec_msg msg = {};
  245. long err;
  246. if (copy_from_user(&msg, parg, sizeof(msg)))
  247. return -EFAULT;
  248. err = cec_receive_msg(fh, &msg, block);
  249. if (err)
  250. return err;
  251. msg.flags = 0;
  252. if (copy_to_user(parg, &msg, sizeof(msg)))
  253. return -EFAULT;
  254. return 0;
  255. }
  256. static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
  257. bool block, struct cec_event __user *parg)
  258. {
  259. struct cec_event_entry *ev = NULL;
  260. u64 ts = ~0ULL;
  261. unsigned int i;
  262. unsigned int ev_idx;
  263. long err = 0;
  264. mutex_lock(&fh->lock);
  265. while (!fh->total_queued_events && block) {
  266. mutex_unlock(&fh->lock);
  267. err = wait_event_interruptible(fh->wait,
  268. fh->total_queued_events);
  269. if (err)
  270. return err;
  271. mutex_lock(&fh->lock);
  272. }
  273. /* Find the oldest event */
  274. for (i = 0; i < CEC_NUM_EVENTS; i++) {
  275. struct cec_event_entry *entry =
  276. list_first_entry_or_null(&fh->events[i],
  277. struct cec_event_entry, list);
  278. if (entry && entry->ev.ts <= ts) {
  279. ev = entry;
  280. ev_idx = i;
  281. ts = ev->ev.ts;
  282. }
  283. }
  284. if (!ev) {
  285. err = -EAGAIN;
  286. goto unlock;
  287. }
  288. list_del(&ev->list);
  289. if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
  290. err = -EFAULT;
  291. if (ev_idx >= CEC_NUM_CORE_EVENTS)
  292. kfree(ev);
  293. fh->queued_events[ev_idx]--;
  294. fh->total_queued_events--;
  295. unlock:
  296. mutex_unlock(&fh->lock);
  297. return err;
  298. }
  299. static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
  300. u32 __user *parg)
  301. {
  302. u32 mode = fh->mode_initiator | fh->mode_follower;
  303. if (copy_to_user(parg, &mode, sizeof(mode)))
  304. return -EFAULT;
  305. return 0;
  306. }
  307. static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
  308. u32 __user *parg)
  309. {
  310. u32 mode;
  311. u8 mode_initiator;
  312. u8 mode_follower;
  313. long err = 0;
  314. if (copy_from_user(&mode, parg, sizeof(mode)))
  315. return -EFAULT;
  316. if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
  317. dprintk(1, "%s: invalid mode bits set\n", __func__);
  318. return -EINVAL;
  319. }
  320. mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
  321. mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
  322. if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
  323. mode_follower > CEC_MODE_MONITOR_ALL) {
  324. dprintk(1, "%s: unknown mode\n", __func__);
  325. return -EINVAL;
  326. }
  327. if (mode_follower == CEC_MODE_MONITOR_ALL &&
  328. !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
  329. dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
  330. return -EINVAL;
  331. }
  332. if (mode_follower == CEC_MODE_MONITOR_PIN &&
  333. !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
  334. dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
  335. return -EINVAL;
  336. }
  337. /* Follower modes should always be able to send CEC messages */
  338. if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
  339. !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
  340. mode_follower >= CEC_MODE_FOLLOWER &&
  341. mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
  342. dprintk(1, "%s: cannot transmit\n", __func__);
  343. return -EINVAL;
  344. }
  345. /* Monitor modes require CEC_MODE_NO_INITIATOR */
  346. if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
  347. dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
  348. __func__);
  349. return -EINVAL;
  350. }
  351. /* Monitor modes require CAP_NET_ADMIN */
  352. if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
  353. return -EPERM;
  354. mutex_lock(&adap->lock);
  355. /*
  356. * You can't become exclusive follower if someone else already
  357. * has that job.
  358. */
  359. if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
  360. mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
  361. adap->cec_follower && adap->cec_follower != fh)
  362. err = -EBUSY;
  363. /*
  364. * You can't become exclusive initiator if someone else already
  365. * has that job.
  366. */
  367. if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
  368. adap->cec_initiator && adap->cec_initiator != fh)
  369. err = -EBUSY;
  370. if (!err) {
  371. bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
  372. bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
  373. if (old_mon_all != new_mon_all) {
  374. if (new_mon_all)
  375. err = cec_monitor_all_cnt_inc(adap);
  376. else
  377. cec_monitor_all_cnt_dec(adap);
  378. }
  379. }
  380. if (err) {
  381. mutex_unlock(&adap->lock);
  382. return err;
  383. }
  384. if (fh->mode_follower == CEC_MODE_FOLLOWER)
  385. adap->follower_cnt--;
  386. if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
  387. adap->monitor_pin_cnt--;
  388. if (mode_follower == CEC_MODE_FOLLOWER)
  389. adap->follower_cnt++;
  390. if (mode_follower == CEC_MODE_MONITOR_PIN) {
  391. struct cec_event ev = {
  392. .flags = CEC_EVENT_FL_INITIAL_STATE,
  393. };
  394. ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
  395. CEC_EVENT_PIN_CEC_LOW;
  396. cec_queue_event_fh(fh, &ev, 0);
  397. adap->monitor_pin_cnt++;
  398. }
  399. if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
  400. mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
  401. adap->passthrough =
  402. mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
  403. adap->cec_follower = fh;
  404. } else if (adap->cec_follower == fh) {
  405. adap->passthrough = false;
  406. adap->cec_follower = NULL;
  407. }
  408. if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
  409. adap->cec_initiator = fh;
  410. else if (adap->cec_initiator == fh)
  411. adap->cec_initiator = NULL;
  412. fh->mode_initiator = mode_initiator;
  413. fh->mode_follower = mode_follower;
  414. mutex_unlock(&adap->lock);
  415. return 0;
  416. }
  417. static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  418. {
  419. struct cec_devnode *devnode = cec_devnode_data(filp);
  420. struct cec_fh *fh = filp->private_data;
  421. struct cec_adapter *adap = fh->adap;
  422. bool block = !(filp->f_flags & O_NONBLOCK);
  423. void __user *parg = (void __user *)arg;
  424. if (!devnode->registered)
  425. return -ENODEV;
  426. switch (cmd) {
  427. case CEC_ADAP_G_CAPS:
  428. return cec_adap_g_caps(adap, parg);
  429. case CEC_ADAP_G_PHYS_ADDR:
  430. return cec_adap_g_phys_addr(adap, parg);
  431. case CEC_ADAP_S_PHYS_ADDR:
  432. return cec_adap_s_phys_addr(adap, fh, block, parg);
  433. case CEC_ADAP_G_LOG_ADDRS:
  434. return cec_adap_g_log_addrs(adap, parg);
  435. case CEC_ADAP_S_LOG_ADDRS:
  436. return cec_adap_s_log_addrs(adap, fh, block, parg);
  437. case CEC_TRANSMIT:
  438. return cec_transmit(adap, fh, block, parg);
  439. case CEC_RECEIVE:
  440. return cec_receive(adap, fh, block, parg);
  441. case CEC_DQEVENT:
  442. return cec_dqevent(adap, fh, block, parg);
  443. case CEC_G_MODE:
  444. return cec_g_mode(adap, fh, parg);
  445. case CEC_S_MODE:
  446. return cec_s_mode(adap, fh, parg);
  447. default:
  448. return -ENOTTY;
  449. }
  450. }
  451. static int cec_open(struct inode *inode, struct file *filp)
  452. {
  453. struct cec_devnode *devnode =
  454. container_of(inode->i_cdev, struct cec_devnode, cdev);
  455. struct cec_adapter *adap = to_cec_adapter(devnode);
  456. struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
  457. /*
  458. * Initial events that are automatically sent when the cec device is
  459. * opened.
  460. */
  461. struct cec_event ev = {
  462. .event = CEC_EVENT_STATE_CHANGE,
  463. .flags = CEC_EVENT_FL_INITIAL_STATE,
  464. };
  465. unsigned int i;
  466. int err;
  467. if (!fh)
  468. return -ENOMEM;
  469. INIT_LIST_HEAD(&fh->msgs);
  470. INIT_LIST_HEAD(&fh->xfer_list);
  471. for (i = 0; i < CEC_NUM_EVENTS; i++)
  472. INIT_LIST_HEAD(&fh->events[i]);
  473. mutex_init(&fh->lock);
  474. init_waitqueue_head(&fh->wait);
  475. fh->mode_initiator = CEC_MODE_INITIATOR;
  476. fh->adap = adap;
  477. err = cec_get_device(devnode);
  478. if (err) {
  479. kfree(fh);
  480. return err;
  481. }
  482. mutex_lock(&devnode->lock);
  483. if (list_empty(&devnode->fhs) &&
  484. !adap->needs_hpd &&
  485. adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
  486. err = adap->ops->adap_enable(adap, true);
  487. if (err) {
  488. mutex_unlock(&devnode->lock);
  489. kfree(fh);
  490. return err;
  491. }
  492. }
  493. filp->private_data = fh;
  494. /* Queue up initial state events */
  495. ev.state_change.phys_addr = adap->phys_addr;
  496. ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
  497. cec_queue_event_fh(fh, &ev, 0);
  498. #ifdef CONFIG_CEC_PIN
  499. if (adap->pin && adap->pin->ops->read_hpd) {
  500. err = adap->pin->ops->read_hpd(adap);
  501. if (err >= 0) {
  502. ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
  503. CEC_EVENT_PIN_HPD_LOW;
  504. cec_queue_event_fh(fh, &ev, 0);
  505. }
  506. }
  507. #endif
  508. list_add(&fh->list, &devnode->fhs);
  509. mutex_unlock(&devnode->lock);
  510. return 0;
  511. }
  512. /* Override for the release function */
  513. static int cec_release(struct inode *inode, struct file *filp)
  514. {
  515. struct cec_devnode *devnode = cec_devnode_data(filp);
  516. struct cec_adapter *adap = to_cec_adapter(devnode);
  517. struct cec_fh *fh = filp->private_data;
  518. unsigned int i;
  519. mutex_lock(&adap->lock);
  520. if (adap->cec_initiator == fh)
  521. adap->cec_initiator = NULL;
  522. if (adap->cec_follower == fh) {
  523. adap->cec_follower = NULL;
  524. adap->passthrough = false;
  525. }
  526. if (fh->mode_follower == CEC_MODE_FOLLOWER)
  527. adap->follower_cnt--;
  528. if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
  529. adap->monitor_pin_cnt--;
  530. if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
  531. cec_monitor_all_cnt_dec(adap);
  532. mutex_unlock(&adap->lock);
  533. mutex_lock(&devnode->lock);
  534. list_del(&fh->list);
  535. if (list_empty(&devnode->fhs) &&
  536. !adap->needs_hpd &&
  537. adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
  538. WARN_ON(adap->ops->adap_enable(adap, false));
  539. }
  540. mutex_unlock(&devnode->lock);
  541. /* Unhook pending transmits from this filehandle. */
  542. mutex_lock(&adap->lock);
  543. while (!list_empty(&fh->xfer_list)) {
  544. struct cec_data *data =
  545. list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
  546. data->blocking = false;
  547. data->fh = NULL;
  548. list_del(&data->xfer_list);
  549. }
  550. mutex_unlock(&adap->lock);
  551. while (!list_empty(&fh->msgs)) {
  552. struct cec_msg_entry *entry =
  553. list_first_entry(&fh->msgs, struct cec_msg_entry, list);
  554. list_del(&entry->list);
  555. kfree(entry);
  556. }
  557. for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
  558. while (!list_empty(&fh->events[i])) {
  559. struct cec_event_entry *entry =
  560. list_first_entry(&fh->events[i],
  561. struct cec_event_entry, list);
  562. list_del(&entry->list);
  563. kfree(entry);
  564. }
  565. }
  566. kfree(fh);
  567. cec_put_device(devnode);
  568. filp->private_data = NULL;
  569. return 0;
  570. }
  571. const struct file_operations cec_devnode_fops = {
  572. .owner = THIS_MODULE,
  573. .open = cec_open,
  574. .unlocked_ioctl = cec_ioctl,
  575. .release = cec_release,
  576. .poll = cec_poll,
  577. .llseek = no_llseek,
  578. };