main.c 22 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022
  1. /*
  2. *
  3. * Intel Management Engine Interface (Intel MEI) Linux driver
  4. * Copyright (c) 2003-2018, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #include <linux/module.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/kernel.h>
  19. #include <linux/device.h>
  20. #include <linux/slab.h>
  21. #include <linux/fs.h>
  22. #include <linux/errno.h>
  23. #include <linux/types.h>
  24. #include <linux/fcntl.h>
  25. #include <linux/poll.h>
  26. #include <linux/init.h>
  27. #include <linux/ioctl.h>
  28. #include <linux/cdev.h>
  29. #include <linux/sched/signal.h>
  30. #include <linux/uuid.h>
  31. #include <linux/compat.h>
  32. #include <linux/jiffies.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/mei.h>
  35. #include "mei_dev.h"
  36. #include "client.h"
  37. /**
  38. * mei_open - the open function
  39. *
  40. * @inode: pointer to inode structure
  41. * @file: pointer to file structure
  42. *
  43. * Return: 0 on success, <0 on error
  44. */
  45. static int mei_open(struct inode *inode, struct file *file)
  46. {
  47. struct mei_device *dev;
  48. struct mei_cl *cl;
  49. int err;
  50. dev = container_of(inode->i_cdev, struct mei_device, cdev);
  51. if (!dev)
  52. return -ENODEV;
  53. mutex_lock(&dev->device_lock);
  54. if (dev->dev_state != MEI_DEV_ENABLED) {
  55. dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
  56. mei_dev_state_str(dev->dev_state));
  57. err = -ENODEV;
  58. goto err_unlock;
  59. }
  60. cl = mei_cl_alloc_linked(dev);
  61. if (IS_ERR(cl)) {
  62. err = PTR_ERR(cl);
  63. goto err_unlock;
  64. }
  65. cl->fp = file;
  66. file->private_data = cl;
  67. mutex_unlock(&dev->device_lock);
  68. return nonseekable_open(inode, file);
  69. err_unlock:
  70. mutex_unlock(&dev->device_lock);
  71. return err;
  72. }
  73. /**
  74. * mei_release - the release function
  75. *
  76. * @inode: pointer to inode structure
  77. * @file: pointer to file structure
  78. *
  79. * Return: 0 on success, <0 on error
  80. */
  81. static int mei_release(struct inode *inode, struct file *file)
  82. {
  83. struct mei_cl *cl = file->private_data;
  84. struct mei_device *dev;
  85. int rets;
  86. if (WARN_ON(!cl || !cl->dev))
  87. return -ENODEV;
  88. dev = cl->dev;
  89. mutex_lock(&dev->device_lock);
  90. rets = mei_cl_disconnect(cl);
  91. mei_cl_flush_queues(cl, file);
  92. cl_dbg(dev, cl, "removing\n");
  93. mei_cl_unlink(cl);
  94. file->private_data = NULL;
  95. kfree(cl);
  96. mutex_unlock(&dev->device_lock);
  97. return rets;
  98. }
  99. /**
  100. * mei_read - the read function.
  101. *
  102. * @file: pointer to file structure
  103. * @ubuf: pointer to user buffer
  104. * @length: buffer length
  105. * @offset: data offset in buffer
  106. *
  107. * Return: >=0 data length on success , <0 on error
  108. */
  109. static ssize_t mei_read(struct file *file, char __user *ubuf,
  110. size_t length, loff_t *offset)
  111. {
  112. struct mei_cl *cl = file->private_data;
  113. struct mei_device *dev;
  114. struct mei_cl_cb *cb = NULL;
  115. bool nonblock = !!(file->f_flags & O_NONBLOCK);
  116. ssize_t rets;
  117. if (WARN_ON(!cl || !cl->dev))
  118. return -ENODEV;
  119. dev = cl->dev;
  120. mutex_lock(&dev->device_lock);
  121. if (dev->dev_state != MEI_DEV_ENABLED) {
  122. rets = -ENODEV;
  123. goto out;
  124. }
  125. if (length == 0) {
  126. rets = 0;
  127. goto out;
  128. }
  129. if (ubuf == NULL) {
  130. rets = -EMSGSIZE;
  131. goto out;
  132. }
  133. cb = mei_cl_read_cb(cl, file);
  134. if (cb)
  135. goto copy_buffer;
  136. if (*offset > 0)
  137. *offset = 0;
  138. rets = mei_cl_read_start(cl, length, file);
  139. if (rets && rets != -EBUSY) {
  140. cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets);
  141. goto out;
  142. }
  143. if (nonblock) {
  144. rets = -EAGAIN;
  145. goto out;
  146. }
  147. mutex_unlock(&dev->device_lock);
  148. if (wait_event_interruptible(cl->rx_wait,
  149. !list_empty(&cl->rd_completed) ||
  150. !mei_cl_is_connected(cl))) {
  151. if (signal_pending(current))
  152. return -EINTR;
  153. return -ERESTARTSYS;
  154. }
  155. mutex_lock(&dev->device_lock);
  156. if (!mei_cl_is_connected(cl)) {
  157. rets = -ENODEV;
  158. goto out;
  159. }
  160. cb = mei_cl_read_cb(cl, file);
  161. if (!cb) {
  162. rets = 0;
  163. goto out;
  164. }
  165. copy_buffer:
  166. /* now copy the data to user space */
  167. if (cb->status) {
  168. rets = cb->status;
  169. cl_dbg(dev, cl, "read operation failed %zd\n", rets);
  170. goto free;
  171. }
  172. cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
  173. cb->buf.size, cb->buf_idx, *offset);
  174. if (*offset >= cb->buf_idx) {
  175. rets = 0;
  176. goto free;
  177. }
  178. /* length is being truncated to PAGE_SIZE,
  179. * however buf_idx may point beyond that */
  180. length = min_t(size_t, length, cb->buf_idx - *offset);
  181. if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
  182. dev_dbg(dev->dev, "failed to copy data to userland\n");
  183. rets = -EFAULT;
  184. goto free;
  185. }
  186. rets = length;
  187. *offset += length;
  188. /* not all data was read, keep the cb */
  189. if (*offset < cb->buf_idx)
  190. goto out;
  191. free:
  192. mei_io_cb_free(cb);
  193. *offset = 0;
  194. out:
  195. cl_dbg(dev, cl, "end mei read rets = %zd\n", rets);
  196. mutex_unlock(&dev->device_lock);
  197. return rets;
  198. }
  199. /**
  200. * mei_write - the write function.
  201. *
  202. * @file: pointer to file structure
  203. * @ubuf: pointer to user buffer
  204. * @length: buffer length
  205. * @offset: data offset in buffer
  206. *
  207. * Return: >=0 data length on success , <0 on error
  208. */
  209. static ssize_t mei_write(struct file *file, const char __user *ubuf,
  210. size_t length, loff_t *offset)
  211. {
  212. struct mei_cl *cl = file->private_data;
  213. struct mei_cl_cb *cb;
  214. struct mei_device *dev;
  215. ssize_t rets;
  216. if (WARN_ON(!cl || !cl->dev))
  217. return -ENODEV;
  218. dev = cl->dev;
  219. mutex_lock(&dev->device_lock);
  220. if (dev->dev_state != MEI_DEV_ENABLED) {
  221. rets = -ENODEV;
  222. goto out;
  223. }
  224. if (!mei_cl_is_connected(cl)) {
  225. cl_err(dev, cl, "is not connected");
  226. rets = -ENODEV;
  227. goto out;
  228. }
  229. if (!mei_me_cl_is_active(cl->me_cl)) {
  230. rets = -ENOTTY;
  231. goto out;
  232. }
  233. if (length > mei_cl_mtu(cl)) {
  234. rets = -EFBIG;
  235. goto out;
  236. }
  237. if (length == 0) {
  238. rets = 0;
  239. goto out;
  240. }
  241. while (cl->tx_cb_queued >= dev->tx_queue_limit) {
  242. if (file->f_flags & O_NONBLOCK) {
  243. rets = -EAGAIN;
  244. goto out;
  245. }
  246. mutex_unlock(&dev->device_lock);
  247. rets = wait_event_interruptible(cl->tx_wait,
  248. cl->writing_state == MEI_WRITE_COMPLETE ||
  249. (!mei_cl_is_connected(cl)));
  250. mutex_lock(&dev->device_lock);
  251. if (rets) {
  252. if (signal_pending(current))
  253. rets = -EINTR;
  254. goto out;
  255. }
  256. if (!mei_cl_is_connected(cl)) {
  257. rets = -ENODEV;
  258. goto out;
  259. }
  260. }
  261. cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
  262. if (!cb) {
  263. rets = -ENOMEM;
  264. goto out;
  265. }
  266. rets = copy_from_user(cb->buf.data, ubuf, length);
  267. if (rets) {
  268. dev_dbg(dev->dev, "failed to copy data from userland\n");
  269. rets = -EFAULT;
  270. mei_io_cb_free(cb);
  271. goto out;
  272. }
  273. rets = mei_cl_write(cl, cb);
  274. out:
  275. mutex_unlock(&dev->device_lock);
  276. return rets;
  277. }
  278. /**
  279. * mei_ioctl_connect_client - the connect to fw client IOCTL function
  280. *
  281. * @file: private data of the file object
  282. * @data: IOCTL connect data, input and output parameters
  283. *
  284. * Locking: called under "dev->device_lock" lock
  285. *
  286. * Return: 0 on success, <0 on failure.
  287. */
  288. static int mei_ioctl_connect_client(struct file *file,
  289. struct mei_connect_client_data *data)
  290. {
  291. struct mei_device *dev;
  292. struct mei_client *client;
  293. struct mei_me_client *me_cl;
  294. struct mei_cl *cl;
  295. int rets;
  296. cl = file->private_data;
  297. dev = cl->dev;
  298. if (dev->dev_state != MEI_DEV_ENABLED)
  299. return -ENODEV;
  300. if (cl->state != MEI_FILE_INITIALIZING &&
  301. cl->state != MEI_FILE_DISCONNECTED)
  302. return -EBUSY;
  303. /* find ME client we're trying to connect to */
  304. me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
  305. if (!me_cl) {
  306. dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
  307. &data->in_client_uuid);
  308. rets = -ENOTTY;
  309. goto end;
  310. }
  311. if (me_cl->props.fixed_address) {
  312. bool forbidden = dev->override_fixed_address ?
  313. !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
  314. if (forbidden) {
  315. dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
  316. &data->in_client_uuid);
  317. rets = -ENOTTY;
  318. goto end;
  319. }
  320. }
  321. dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
  322. me_cl->client_id);
  323. dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n",
  324. me_cl->props.protocol_version);
  325. dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n",
  326. me_cl->props.max_msg_length);
  327. /* prepare the output buffer */
  328. client = &data->out_client_properties;
  329. client->max_msg_length = me_cl->props.max_msg_length;
  330. client->protocol_version = me_cl->props.protocol_version;
  331. dev_dbg(dev->dev, "Can connect?\n");
  332. rets = mei_cl_connect(cl, me_cl, file);
  333. end:
  334. mei_me_cl_put(me_cl);
  335. return rets;
  336. }
  337. /**
  338. * mei_ioctl_client_notify_request -
  339. * propagate event notification request to client
  340. *
  341. * @file: pointer to file structure
  342. * @request: 0 - disable, 1 - enable
  343. *
  344. * Return: 0 on success , <0 on error
  345. */
  346. static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
  347. {
  348. struct mei_cl *cl = file->private_data;
  349. if (request != MEI_HBM_NOTIFICATION_START &&
  350. request != MEI_HBM_NOTIFICATION_STOP)
  351. return -EINVAL;
  352. return mei_cl_notify_request(cl, file, (u8)request);
  353. }
  354. /**
  355. * mei_ioctl_client_notify_get - wait for notification request
  356. *
  357. * @file: pointer to file structure
  358. * @notify_get: 0 - disable, 1 - enable
  359. *
  360. * Return: 0 on success , <0 on error
  361. */
  362. static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
  363. {
  364. struct mei_cl *cl = file->private_data;
  365. bool notify_ev;
  366. bool block = (file->f_flags & O_NONBLOCK) == 0;
  367. int rets;
  368. rets = mei_cl_notify_get(cl, block, &notify_ev);
  369. if (rets)
  370. return rets;
  371. *notify_get = notify_ev ? 1 : 0;
  372. return 0;
  373. }
  374. /**
  375. * mei_ioctl - the IOCTL function
  376. *
  377. * @file: pointer to file structure
  378. * @cmd: ioctl command
  379. * @data: pointer to mei message structure
  380. *
  381. * Return: 0 on success , <0 on error
  382. */
  383. static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
  384. {
  385. struct mei_device *dev;
  386. struct mei_cl *cl = file->private_data;
  387. struct mei_connect_client_data connect_data;
  388. u32 notify_get, notify_req;
  389. int rets;
  390. if (WARN_ON(!cl || !cl->dev))
  391. return -ENODEV;
  392. dev = cl->dev;
  393. dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd);
  394. mutex_lock(&dev->device_lock);
  395. if (dev->dev_state != MEI_DEV_ENABLED) {
  396. rets = -ENODEV;
  397. goto out;
  398. }
  399. switch (cmd) {
  400. case IOCTL_MEI_CONNECT_CLIENT:
  401. dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
  402. if (copy_from_user(&connect_data, (char __user *)data,
  403. sizeof(struct mei_connect_client_data))) {
  404. dev_dbg(dev->dev, "failed to copy data from userland\n");
  405. rets = -EFAULT;
  406. goto out;
  407. }
  408. rets = mei_ioctl_connect_client(file, &connect_data);
  409. if (rets)
  410. goto out;
  411. /* if all is ok, copying the data back to user. */
  412. if (copy_to_user((char __user *)data, &connect_data,
  413. sizeof(struct mei_connect_client_data))) {
  414. dev_dbg(dev->dev, "failed to copy data to userland\n");
  415. rets = -EFAULT;
  416. goto out;
  417. }
  418. break;
  419. case IOCTL_MEI_NOTIFY_SET:
  420. dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n");
  421. if (copy_from_user(&notify_req,
  422. (char __user *)data, sizeof(notify_req))) {
  423. dev_dbg(dev->dev, "failed to copy data from userland\n");
  424. rets = -EFAULT;
  425. goto out;
  426. }
  427. rets = mei_ioctl_client_notify_request(file, notify_req);
  428. break;
  429. case IOCTL_MEI_NOTIFY_GET:
  430. dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n");
  431. rets = mei_ioctl_client_notify_get(file, &notify_get);
  432. if (rets)
  433. goto out;
  434. dev_dbg(dev->dev, "copy connect data to user\n");
  435. if (copy_to_user((char __user *)data,
  436. &notify_get, sizeof(notify_get))) {
  437. dev_dbg(dev->dev, "failed to copy data to userland\n");
  438. rets = -EFAULT;
  439. goto out;
  440. }
  441. break;
  442. default:
  443. rets = -ENOIOCTLCMD;
  444. }
  445. out:
  446. mutex_unlock(&dev->device_lock);
  447. return rets;
  448. }
  449. /**
  450. * mei_compat_ioctl - the compat IOCTL function
  451. *
  452. * @file: pointer to file structure
  453. * @cmd: ioctl command
  454. * @data: pointer to mei message structure
  455. *
  456. * Return: 0 on success , <0 on error
  457. */
  458. #ifdef CONFIG_COMPAT
  459. static long mei_compat_ioctl(struct file *file,
  460. unsigned int cmd, unsigned long data)
  461. {
  462. return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
  463. }
  464. #endif
  465. /**
  466. * mei_poll - the poll function
  467. *
  468. * @file: pointer to file structure
  469. * @wait: pointer to poll_table structure
  470. *
  471. * Return: poll mask
  472. */
  473. static __poll_t mei_poll(struct file *file, poll_table *wait)
  474. {
  475. __poll_t req_events = poll_requested_events(wait);
  476. struct mei_cl *cl = file->private_data;
  477. struct mei_device *dev;
  478. __poll_t mask = 0;
  479. bool notify_en;
  480. if (WARN_ON(!cl || !cl->dev))
  481. return EPOLLERR;
  482. dev = cl->dev;
  483. mutex_lock(&dev->device_lock);
  484. notify_en = cl->notify_en && (req_events & EPOLLPRI);
  485. if (dev->dev_state != MEI_DEV_ENABLED ||
  486. !mei_cl_is_connected(cl)) {
  487. mask = EPOLLERR;
  488. goto out;
  489. }
  490. if (notify_en) {
  491. poll_wait(file, &cl->ev_wait, wait);
  492. if (cl->notify_ev)
  493. mask |= EPOLLPRI;
  494. }
  495. if (req_events & (EPOLLIN | EPOLLRDNORM)) {
  496. poll_wait(file, &cl->rx_wait, wait);
  497. if (!list_empty(&cl->rd_completed))
  498. mask |= EPOLLIN | EPOLLRDNORM;
  499. else
  500. mei_cl_read_start(cl, mei_cl_mtu(cl), file);
  501. }
  502. if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
  503. poll_wait(file, &cl->tx_wait, wait);
  504. if (cl->tx_cb_queued < dev->tx_queue_limit)
  505. mask |= EPOLLOUT | EPOLLWRNORM;
  506. }
  507. out:
  508. mutex_unlock(&dev->device_lock);
  509. return mask;
  510. }
  511. /**
  512. * mei_cl_is_write_queued - check if the client has pending writes.
  513. *
  514. * @cl: writing host client
  515. *
  516. * Return: true if client is writing, false otherwise.
  517. */
  518. static bool mei_cl_is_write_queued(struct mei_cl *cl)
  519. {
  520. struct mei_device *dev = cl->dev;
  521. struct mei_cl_cb *cb;
  522. list_for_each_entry(cb, &dev->write_list, list)
  523. if (cb->cl == cl)
  524. return true;
  525. list_for_each_entry(cb, &dev->write_waiting_list, list)
  526. if (cb->cl == cl)
  527. return true;
  528. return false;
  529. }
  530. /**
  531. * mei_fsync - the fsync handler
  532. *
  533. * @fp: pointer to file structure
  534. * @start: unused
  535. * @end: unused
  536. * @datasync: unused
  537. *
  538. * Return: 0 on success, -ENODEV if client is not connected
  539. */
  540. static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
  541. {
  542. struct mei_cl *cl = fp->private_data;
  543. struct mei_device *dev;
  544. int rets;
  545. if (WARN_ON(!cl || !cl->dev))
  546. return -ENODEV;
  547. dev = cl->dev;
  548. mutex_lock(&dev->device_lock);
  549. if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) {
  550. rets = -ENODEV;
  551. goto out;
  552. }
  553. while (mei_cl_is_write_queued(cl)) {
  554. mutex_unlock(&dev->device_lock);
  555. rets = wait_event_interruptible(cl->tx_wait,
  556. cl->writing_state == MEI_WRITE_COMPLETE ||
  557. !mei_cl_is_connected(cl));
  558. mutex_lock(&dev->device_lock);
  559. if (rets) {
  560. if (signal_pending(current))
  561. rets = -EINTR;
  562. goto out;
  563. }
  564. if (!mei_cl_is_connected(cl)) {
  565. rets = -ENODEV;
  566. goto out;
  567. }
  568. }
  569. rets = 0;
  570. out:
  571. mutex_unlock(&dev->device_lock);
  572. return rets;
  573. }
  574. /**
  575. * mei_fasync - asynchronous io support
  576. *
  577. * @fd: file descriptor
  578. * @file: pointer to file structure
  579. * @band: band bitmap
  580. *
  581. * Return: negative on error,
  582. * 0 if it did no changes,
  583. * and positive a process was added or deleted
  584. */
  585. static int mei_fasync(int fd, struct file *file, int band)
  586. {
  587. struct mei_cl *cl = file->private_data;
  588. if (!mei_cl_is_connected(cl))
  589. return -ENODEV;
  590. return fasync_helper(fd, file, band, &cl->ev_async);
  591. }
  592. /**
  593. * fw_status_show - mei device fw_status attribute show method
  594. *
  595. * @device: device pointer
  596. * @attr: attribute pointer
  597. * @buf: char out buffer
  598. *
  599. * Return: number of the bytes printed into buf or error
  600. */
  601. static ssize_t fw_status_show(struct device *device,
  602. struct device_attribute *attr, char *buf)
  603. {
  604. struct mei_device *dev = dev_get_drvdata(device);
  605. struct mei_fw_status fw_status;
  606. int err, i;
  607. ssize_t cnt = 0;
  608. mutex_lock(&dev->device_lock);
  609. err = mei_fw_status(dev, &fw_status);
  610. mutex_unlock(&dev->device_lock);
  611. if (err) {
  612. dev_err(device, "read fw_status error = %d\n", err);
  613. return err;
  614. }
  615. for (i = 0; i < fw_status.count; i++)
  616. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
  617. fw_status.status[i]);
  618. return cnt;
  619. }
  620. static DEVICE_ATTR_RO(fw_status);
  621. /**
  622. * hbm_ver_show - display HBM protocol version negotiated with FW
  623. *
  624. * @device: device pointer
  625. * @attr: attribute pointer
  626. * @buf: char out buffer
  627. *
  628. * Return: number of the bytes printed into buf or error
  629. */
  630. static ssize_t hbm_ver_show(struct device *device,
  631. struct device_attribute *attr, char *buf)
  632. {
  633. struct mei_device *dev = dev_get_drvdata(device);
  634. struct hbm_version ver;
  635. mutex_lock(&dev->device_lock);
  636. ver = dev->version;
  637. mutex_unlock(&dev->device_lock);
  638. return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version);
  639. }
  640. static DEVICE_ATTR_RO(hbm_ver);
  641. /**
  642. * hbm_ver_drv_show - display HBM protocol version advertised by driver
  643. *
  644. * @device: device pointer
  645. * @attr: attribute pointer
  646. * @buf: char out buffer
  647. *
  648. * Return: number of the bytes printed into buf or error
  649. */
  650. static ssize_t hbm_ver_drv_show(struct device *device,
  651. struct device_attribute *attr, char *buf)
  652. {
  653. return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION);
  654. }
  655. static DEVICE_ATTR_RO(hbm_ver_drv);
  656. static ssize_t tx_queue_limit_show(struct device *device,
  657. struct device_attribute *attr, char *buf)
  658. {
  659. struct mei_device *dev = dev_get_drvdata(device);
  660. u8 size = 0;
  661. mutex_lock(&dev->device_lock);
  662. size = dev->tx_queue_limit;
  663. mutex_unlock(&dev->device_lock);
  664. return snprintf(buf, PAGE_SIZE, "%u\n", size);
  665. }
  666. static ssize_t tx_queue_limit_store(struct device *device,
  667. struct device_attribute *attr,
  668. const char *buf, size_t count)
  669. {
  670. struct mei_device *dev = dev_get_drvdata(device);
  671. u8 limit;
  672. unsigned int inp;
  673. int err;
  674. err = kstrtouint(buf, 10, &inp);
  675. if (err)
  676. return err;
  677. if (inp > MEI_TX_QUEUE_LIMIT_MAX || inp < MEI_TX_QUEUE_LIMIT_MIN)
  678. return -EINVAL;
  679. limit = inp;
  680. mutex_lock(&dev->device_lock);
  681. dev->tx_queue_limit = limit;
  682. mutex_unlock(&dev->device_lock);
  683. return count;
  684. }
  685. static DEVICE_ATTR_RW(tx_queue_limit);
  686. /**
  687. * fw_ver_show - display ME FW version
  688. *
  689. * @device: device pointer
  690. * @attr: attribute pointer
  691. * @buf: char out buffer
  692. *
  693. * Return: number of the bytes printed into buf or error
  694. */
  695. static ssize_t fw_ver_show(struct device *device,
  696. struct device_attribute *attr, char *buf)
  697. {
  698. struct mei_device *dev = dev_get_drvdata(device);
  699. struct mei_fw_version *ver;
  700. ssize_t cnt = 0;
  701. int i;
  702. ver = dev->fw_ver;
  703. for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++)
  704. cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n",
  705. ver[i].platform, ver[i].major, ver[i].minor,
  706. ver[i].hotfix, ver[i].buildno);
  707. return cnt;
  708. }
  709. static DEVICE_ATTR_RO(fw_ver);
  710. static struct attribute *mei_attrs[] = {
  711. &dev_attr_fw_status.attr,
  712. &dev_attr_hbm_ver.attr,
  713. &dev_attr_hbm_ver_drv.attr,
  714. &dev_attr_tx_queue_limit.attr,
  715. &dev_attr_fw_ver.attr,
  716. NULL
  717. };
  718. ATTRIBUTE_GROUPS(mei);
  719. /*
  720. * file operations structure will be used for mei char device.
  721. */
  722. static const struct file_operations mei_fops = {
  723. .owner = THIS_MODULE,
  724. .read = mei_read,
  725. .unlocked_ioctl = mei_ioctl,
  726. #ifdef CONFIG_COMPAT
  727. .compat_ioctl = mei_compat_ioctl,
  728. #endif
  729. .open = mei_open,
  730. .release = mei_release,
  731. .write = mei_write,
  732. .poll = mei_poll,
  733. .fsync = mei_fsync,
  734. .fasync = mei_fasync,
  735. .llseek = no_llseek
  736. };
  737. static struct class *mei_class;
  738. static dev_t mei_devt;
  739. #define MEI_MAX_DEVS MINORMASK
  740. static DEFINE_MUTEX(mei_minor_lock);
  741. static DEFINE_IDR(mei_idr);
  742. /**
  743. * mei_minor_get - obtain next free device minor number
  744. *
  745. * @dev: device pointer
  746. *
  747. * Return: allocated minor, or -ENOSPC if no free minor left
  748. */
  749. static int mei_minor_get(struct mei_device *dev)
  750. {
  751. int ret;
  752. mutex_lock(&mei_minor_lock);
  753. ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL);
  754. if (ret >= 0)
  755. dev->minor = ret;
  756. else if (ret == -ENOSPC)
  757. dev_err(dev->dev, "too many mei devices\n");
  758. mutex_unlock(&mei_minor_lock);
  759. return ret;
  760. }
  761. /**
  762. * mei_minor_free - mark device minor number as free
  763. *
  764. * @dev: device pointer
  765. */
  766. static void mei_minor_free(struct mei_device *dev)
  767. {
  768. mutex_lock(&mei_minor_lock);
  769. idr_remove(&mei_idr, dev->minor);
  770. mutex_unlock(&mei_minor_lock);
  771. }
  772. int mei_register(struct mei_device *dev, struct device *parent)
  773. {
  774. struct device *clsdev; /* class device */
  775. int ret, devno;
  776. ret = mei_minor_get(dev);
  777. if (ret < 0)
  778. return ret;
  779. /* Fill in the data structures */
  780. devno = MKDEV(MAJOR(mei_devt), dev->minor);
  781. cdev_init(&dev->cdev, &mei_fops);
  782. dev->cdev.owner = parent->driver->owner;
  783. /* Add the device */
  784. ret = cdev_add(&dev->cdev, devno, 1);
  785. if (ret) {
  786. dev_err(parent, "unable to add device %d:%d\n",
  787. MAJOR(mei_devt), dev->minor);
  788. goto err_dev_add;
  789. }
  790. clsdev = device_create_with_groups(mei_class, parent, devno,
  791. dev, mei_groups,
  792. "mei%d", dev->minor);
  793. if (IS_ERR(clsdev)) {
  794. dev_err(parent, "unable to create device %d:%d\n",
  795. MAJOR(mei_devt), dev->minor);
  796. ret = PTR_ERR(clsdev);
  797. goto err_dev_create;
  798. }
  799. ret = mei_dbgfs_register(dev, dev_name(clsdev));
  800. if (ret) {
  801. dev_err(clsdev, "cannot register debugfs ret = %d\n", ret);
  802. goto err_dev_dbgfs;
  803. }
  804. return 0;
  805. err_dev_dbgfs:
  806. device_destroy(mei_class, devno);
  807. err_dev_create:
  808. cdev_del(&dev->cdev);
  809. err_dev_add:
  810. mei_minor_free(dev);
  811. return ret;
  812. }
  813. EXPORT_SYMBOL_GPL(mei_register);
  814. void mei_deregister(struct mei_device *dev)
  815. {
  816. int devno;
  817. devno = dev->cdev.dev;
  818. cdev_del(&dev->cdev);
  819. mei_dbgfs_deregister(dev);
  820. device_destroy(mei_class, devno);
  821. mei_minor_free(dev);
  822. }
  823. EXPORT_SYMBOL_GPL(mei_deregister);
  824. static int __init mei_init(void)
  825. {
  826. int ret;
  827. mei_class = class_create(THIS_MODULE, "mei");
  828. if (IS_ERR(mei_class)) {
  829. pr_err("couldn't create class\n");
  830. ret = PTR_ERR(mei_class);
  831. goto err;
  832. }
  833. ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
  834. if (ret < 0) {
  835. pr_err("unable to allocate char dev region\n");
  836. goto err_class;
  837. }
  838. ret = mei_cl_bus_init();
  839. if (ret < 0) {
  840. pr_err("unable to initialize bus\n");
  841. goto err_chrdev;
  842. }
  843. return 0;
  844. err_chrdev:
  845. unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
  846. err_class:
  847. class_destroy(mei_class);
  848. err:
  849. return ret;
  850. }
  851. static void __exit mei_exit(void)
  852. {
  853. unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
  854. class_destroy(mei_class);
  855. mei_cl_bus_exit();
  856. }
  857. module_init(mei_init);
  858. module_exit(mei_exit);
  859. MODULE_AUTHOR("Intel Corporation");
  860. MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
  861. MODULE_LICENSE("GPL v2");