mtty.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503
  1. /*
  2. * Mediated virtual PCI serial host device driver
  3. *
  4. * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
  5. * Author: Neo Jia <cjia@nvidia.com>
  6. * Kirti Wankhede <kwankhede@nvidia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Sample driver that creates mdev device that simulates serial port over PCI
  13. * card.
  14. *
  15. */
  16. #include <linux/init.h>
  17. #include <linux/module.h>
  18. #include <linux/device.h>
  19. #include <linux/kernel.h>
  20. #include <linux/fs.h>
  21. #include <linux/poll.h>
  22. #include <linux/slab.h>
  23. #include <linux/cdev.h>
  24. #include <linux/sched.h>
  25. #include <linux/wait.h>
  26. #include <linux/uuid.h>
  27. #include <linux/vfio.h>
  28. #include <linux/iommu.h>
  29. #include <linux/sysfs.h>
  30. #include <linux/ctype.h>
  31. #include <linux/file.h>
  32. #include <linux/mdev.h>
  33. #include <linux/pci.h>
  34. #include <linux/serial.h>
  35. #include <uapi/linux/serial_reg.h>
  36. #include <linux/eventfd.h>
  37. /*
  38. * #defines
  39. */
  40. #define VERSION_STRING "0.1"
  41. #define DRIVER_AUTHOR "NVIDIA Corporation"
  42. #define MTTY_CLASS_NAME "mtty"
  43. #define MTTY_NAME "mtty"
  44. #define MTTY_STRING_LEN 16
  45. #define MTTY_CONFIG_SPACE_SIZE 0xff
  46. #define MTTY_IO_BAR_SIZE 0x8
  47. #define MTTY_MMIO_BAR_SIZE 0x100000
  48. #define STORE_LE16(addr, val) (*(u16 *)addr = val)
  49. #define STORE_LE32(addr, val) (*(u32 *)addr = val)
  50. #define MAX_FIFO_SIZE 16
  51. #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
  52. #define MTTY_VFIO_PCI_OFFSET_SHIFT 40
  53. #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
  54. #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
  55. ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
  56. #define MTTY_VFIO_PCI_OFFSET_MASK \
  57. (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
  58. #define MAX_MTTYS 24
  59. /*
  60. * Global Structures
  61. */
  62. struct mtty_dev {
  63. dev_t vd_devt;
  64. struct class *vd_class;
  65. struct cdev vd_cdev;
  66. struct idr vd_idr;
  67. struct device dev;
  68. } mtty_dev;
  69. struct mdev_region_info {
  70. u64 start;
  71. u64 phys_start;
  72. u32 size;
  73. u64 vfio_offset;
  74. };
  75. #if defined(DEBUG_REGS)
  76. const char *wr_reg[] = {
  77. "TX",
  78. "IER",
  79. "FCR",
  80. "LCR",
  81. "MCR",
  82. "LSR",
  83. "MSR",
  84. "SCR"
  85. };
  86. const char *rd_reg[] = {
  87. "RX",
  88. "IER",
  89. "IIR",
  90. "LCR",
  91. "MCR",
  92. "LSR",
  93. "MSR",
  94. "SCR"
  95. };
  96. #endif
  97. /* loop back buffer */
  98. struct rxtx {
  99. u8 fifo[MAX_FIFO_SIZE];
  100. u8 head, tail;
  101. u8 count;
  102. };
  103. struct serial_port {
  104. u8 uart_reg[8]; /* 8 registers */
  105. struct rxtx rxtx; /* loop back buffer */
  106. bool dlab;
  107. bool overrun;
  108. u16 divisor;
  109. u8 fcr; /* FIFO control register */
  110. u8 max_fifo_size;
  111. u8 intr_trigger_level; /* interrupt trigger level */
  112. };
  113. /* State of each mdev device */
  114. struct mdev_state {
  115. int irq_fd;
  116. struct eventfd_ctx *intx_evtfd;
  117. struct eventfd_ctx *msi_evtfd;
  118. int irq_index;
  119. u8 *vconfig;
  120. struct mutex ops_lock;
  121. struct mdev_device *mdev;
  122. struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
  123. u32 bar_mask[VFIO_PCI_NUM_REGIONS];
  124. struct list_head next;
  125. struct serial_port s[2];
  126. struct mutex rxtx_lock;
  127. struct vfio_device_info dev_info;
  128. int nr_ports;
  129. };
  130. struct mutex mdev_list_lock;
  131. struct list_head mdev_devices_list;
  132. static const struct file_operations vd_fops = {
  133. .owner = THIS_MODULE,
  134. };
  135. /* function prototypes */
  136. static int mtty_trigger_interrupt(uuid_le uuid);
  137. /* Helper functions */
  138. static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
  139. {
  140. struct mdev_state *mds;
  141. list_for_each_entry(mds, &mdev_devices_list, next) {
  142. if (uuid_le_cmp(mds->mdev->uuid, uuid) == 0)
  143. return mds;
  144. }
  145. return NULL;
  146. }
  147. void dump_buffer(char *buf, uint32_t count)
  148. {
  149. #if defined(DEBUG)
  150. int i;
  151. pr_info("Buffer:\n");
  152. for (i = 0; i < count; i++) {
  153. pr_info("%2x ", *(buf + i));
  154. if ((i + 1) % 16 == 0)
  155. pr_info("\n");
  156. }
  157. #endif
  158. }
  159. static void mtty_create_config_space(struct mdev_state *mdev_state)
  160. {
  161. /* PCI dev ID */
  162. STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
  163. /* Control: I/O+, Mem-, BusMaster- */
  164. STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
  165. /* Status: capabilities list absent */
  166. STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
  167. /* Rev ID */
  168. mdev_state->vconfig[0x8] = 0x10;
  169. /* programming interface class : 16550-compatible serial controller */
  170. mdev_state->vconfig[0x9] = 0x02;
  171. /* Sub class : 00 */
  172. mdev_state->vconfig[0xa] = 0x00;
  173. /* Base class : Simple Communication controllers */
  174. mdev_state->vconfig[0xb] = 0x07;
  175. /* base address registers */
  176. /* BAR0: IO space */
  177. STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
  178. mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
  179. if (mdev_state->nr_ports == 2) {
  180. /* BAR1: IO space */
  181. STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
  182. mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
  183. }
  184. /* Subsystem ID */
  185. STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
  186. mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */
  187. mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */
  188. /* Vendor specific data */
  189. mdev_state->vconfig[0x40] = 0x23;
  190. mdev_state->vconfig[0x43] = 0x80;
  191. mdev_state->vconfig[0x44] = 0x23;
  192. mdev_state->vconfig[0x48] = 0x23;
  193. mdev_state->vconfig[0x4c] = 0x23;
  194. mdev_state->vconfig[0x60] = 0x50;
  195. mdev_state->vconfig[0x61] = 0x43;
  196. mdev_state->vconfig[0x62] = 0x49;
  197. mdev_state->vconfig[0x63] = 0x20;
  198. mdev_state->vconfig[0x64] = 0x53;
  199. mdev_state->vconfig[0x65] = 0x65;
  200. mdev_state->vconfig[0x66] = 0x72;
  201. mdev_state->vconfig[0x67] = 0x69;
  202. mdev_state->vconfig[0x68] = 0x61;
  203. mdev_state->vconfig[0x69] = 0x6c;
  204. mdev_state->vconfig[0x6a] = 0x2f;
  205. mdev_state->vconfig[0x6b] = 0x55;
  206. mdev_state->vconfig[0x6c] = 0x41;
  207. mdev_state->vconfig[0x6d] = 0x52;
  208. mdev_state->vconfig[0x6e] = 0x54;
  209. }
  210. static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
  211. char *buf, u32 count)
  212. {
  213. u32 cfg_addr, bar_mask, bar_index = 0;
  214. switch (offset) {
  215. case 0x04: /* device control */
  216. case 0x06: /* device status */
  217. /* do nothing */
  218. break;
  219. case 0x3c: /* interrupt line */
  220. mdev_state->vconfig[0x3c] = buf[0];
  221. break;
  222. case 0x3d:
  223. /*
  224. * Interrupt Pin is hardwired to INTA.
  225. * This field is write protected by hardware
  226. */
  227. break;
  228. case 0x10: /* BAR0 */
  229. case 0x14: /* BAR1 */
  230. if (offset == 0x10)
  231. bar_index = 0;
  232. else if (offset == 0x14)
  233. bar_index = 1;
  234. if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
  235. STORE_LE32(&mdev_state->vconfig[offset], 0);
  236. break;
  237. }
  238. cfg_addr = *(u32 *)buf;
  239. pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
  240. if (cfg_addr == 0xffffffff) {
  241. bar_mask = mdev_state->bar_mask[bar_index];
  242. cfg_addr = (cfg_addr & bar_mask);
  243. }
  244. cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
  245. STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
  246. break;
  247. case 0x18: /* BAR2 */
  248. case 0x1c: /* BAR3 */
  249. case 0x20: /* BAR4 */
  250. STORE_LE32(&mdev_state->vconfig[offset], 0);
  251. break;
  252. default:
  253. pr_info("PCI config write @0x%x of %d bytes not handled\n",
  254. offset, count);
  255. break;
  256. }
  257. }
  258. static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
  259. u16 offset, char *buf, u32 count)
  260. {
  261. u8 data = *buf;
  262. /* Handle data written by guest */
  263. switch (offset) {
  264. case UART_TX:
  265. /* if DLAB set, data is LSB of divisor */
  266. if (mdev_state->s[index].dlab) {
  267. mdev_state->s[index].divisor |= data;
  268. break;
  269. }
  270. mutex_lock(&mdev_state->rxtx_lock);
  271. /* save in TX buffer */
  272. if (mdev_state->s[index].rxtx.count <
  273. mdev_state->s[index].max_fifo_size) {
  274. mdev_state->s[index].rxtx.fifo[
  275. mdev_state->s[index].rxtx.head] = data;
  276. mdev_state->s[index].rxtx.count++;
  277. CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
  278. mdev_state->s[index].overrun = false;
  279. /*
  280. * Trigger interrupt if receive data interrupt is
  281. * enabled and fifo reached trigger level
  282. */
  283. if ((mdev_state->s[index].uart_reg[UART_IER] &
  284. UART_IER_RDI) &&
  285. (mdev_state->s[index].rxtx.count ==
  286. mdev_state->s[index].intr_trigger_level)) {
  287. /* trigger interrupt */
  288. #if defined(DEBUG_INTR)
  289. pr_err("Serial port %d: Fifo level trigger\n",
  290. index);
  291. #endif
  292. mtty_trigger_interrupt(mdev_state->mdev->uuid);
  293. }
  294. } else {
  295. #if defined(DEBUG_INTR)
  296. pr_err("Serial port %d: Buffer Overflow\n", index);
  297. #endif
  298. mdev_state->s[index].overrun = true;
  299. /*
  300. * Trigger interrupt if receiver line status interrupt
  301. * is enabled
  302. */
  303. if (mdev_state->s[index].uart_reg[UART_IER] &
  304. UART_IER_RLSI)
  305. mtty_trigger_interrupt(mdev_state->mdev->uuid);
  306. }
  307. mutex_unlock(&mdev_state->rxtx_lock);
  308. break;
  309. case UART_IER:
  310. /* if DLAB set, data is MSB of divisor */
  311. if (mdev_state->s[index].dlab)
  312. mdev_state->s[index].divisor |= (u16)data << 8;
  313. else {
  314. mdev_state->s[index].uart_reg[offset] = data;
  315. mutex_lock(&mdev_state->rxtx_lock);
  316. if ((data & UART_IER_THRI) &&
  317. (mdev_state->s[index].rxtx.head ==
  318. mdev_state->s[index].rxtx.tail)) {
  319. #if defined(DEBUG_INTR)
  320. pr_err("Serial port %d: IER_THRI write\n",
  321. index);
  322. #endif
  323. mtty_trigger_interrupt(mdev_state->mdev->uuid);
  324. }
  325. mutex_unlock(&mdev_state->rxtx_lock);
  326. }
  327. break;
  328. case UART_FCR:
  329. mdev_state->s[index].fcr = data;
  330. mutex_lock(&mdev_state->rxtx_lock);
  331. if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
  332. /* clear loop back FIFO */
  333. mdev_state->s[index].rxtx.count = 0;
  334. mdev_state->s[index].rxtx.head = 0;
  335. mdev_state->s[index].rxtx.tail = 0;
  336. }
  337. mutex_unlock(&mdev_state->rxtx_lock);
  338. switch (data & UART_FCR_TRIGGER_MASK) {
  339. case UART_FCR_TRIGGER_1:
  340. mdev_state->s[index].intr_trigger_level = 1;
  341. break;
  342. case UART_FCR_TRIGGER_4:
  343. mdev_state->s[index].intr_trigger_level = 4;
  344. break;
  345. case UART_FCR_TRIGGER_8:
  346. mdev_state->s[index].intr_trigger_level = 8;
  347. break;
  348. case UART_FCR_TRIGGER_14:
  349. mdev_state->s[index].intr_trigger_level = 14;
  350. break;
  351. }
  352. /*
  353. * Set trigger level to 1 otherwise or implement timer with
  354. * timeout of 4 characters and on expiring that timer set
  355. * Recevice data timeout in IIR register
  356. */
  357. mdev_state->s[index].intr_trigger_level = 1;
  358. if (data & UART_FCR_ENABLE_FIFO)
  359. mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
  360. else {
  361. mdev_state->s[index].max_fifo_size = 1;
  362. mdev_state->s[index].intr_trigger_level = 1;
  363. }
  364. break;
  365. case UART_LCR:
  366. if (data & UART_LCR_DLAB) {
  367. mdev_state->s[index].dlab = true;
  368. mdev_state->s[index].divisor = 0;
  369. } else
  370. mdev_state->s[index].dlab = false;
  371. mdev_state->s[index].uart_reg[offset] = data;
  372. break;
  373. case UART_MCR:
  374. mdev_state->s[index].uart_reg[offset] = data;
  375. if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
  376. (data & UART_MCR_OUT2)) {
  377. #if defined(DEBUG_INTR)
  378. pr_err("Serial port %d: MCR_OUT2 write\n", index);
  379. #endif
  380. mtty_trigger_interrupt(mdev_state->mdev->uuid);
  381. }
  382. if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
  383. (data & (UART_MCR_RTS | UART_MCR_DTR))) {
  384. #if defined(DEBUG_INTR)
  385. pr_err("Serial port %d: MCR RTS/DTR write\n", index);
  386. #endif
  387. mtty_trigger_interrupt(mdev_state->mdev->uuid);
  388. }
  389. break;
  390. case UART_LSR:
  391. case UART_MSR:
  392. /* do nothing */
  393. break;
  394. case UART_SCR:
  395. mdev_state->s[index].uart_reg[offset] = data;
  396. break;
  397. default:
  398. break;
  399. }
  400. }
  401. static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
  402. u16 offset, char *buf, u32 count)
  403. {
  404. /* Handle read requests by guest */
  405. switch (offset) {
  406. case UART_RX:
  407. /* if DLAB set, data is LSB of divisor */
  408. if (mdev_state->s[index].dlab) {
  409. *buf = (u8)mdev_state->s[index].divisor;
  410. break;
  411. }
  412. mutex_lock(&mdev_state->rxtx_lock);
  413. /* return data in tx buffer */
  414. if (mdev_state->s[index].rxtx.head !=
  415. mdev_state->s[index].rxtx.tail) {
  416. *buf = mdev_state->s[index].rxtx.fifo[
  417. mdev_state->s[index].rxtx.tail];
  418. mdev_state->s[index].rxtx.count--;
  419. CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
  420. }
  421. if (mdev_state->s[index].rxtx.head ==
  422. mdev_state->s[index].rxtx.tail) {
  423. /*
  424. * Trigger interrupt if tx buffer empty interrupt is
  425. * enabled and fifo is empty
  426. */
  427. #if defined(DEBUG_INTR)
  428. pr_err("Serial port %d: Buffer Empty\n", index);
  429. #endif
  430. if (mdev_state->s[index].uart_reg[UART_IER] &
  431. UART_IER_THRI)
  432. mtty_trigger_interrupt(mdev_state->mdev->uuid);
  433. }
  434. mutex_unlock(&mdev_state->rxtx_lock);
  435. break;
  436. case UART_IER:
  437. if (mdev_state->s[index].dlab) {
  438. *buf = (u8)(mdev_state->s[index].divisor >> 8);
  439. break;
  440. }
  441. *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
  442. break;
  443. case UART_IIR:
  444. {
  445. u8 ier = mdev_state->s[index].uart_reg[UART_IER];
  446. *buf = 0;
  447. mutex_lock(&mdev_state->rxtx_lock);
  448. /* Interrupt priority 1: Parity, overrun, framing or break */
  449. if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
  450. *buf |= UART_IIR_RLSI;
  451. /* Interrupt priority 2: Fifo trigger level reached */
  452. if ((ier & UART_IER_RDI) &&
  453. (mdev_state->s[index].rxtx.count ==
  454. mdev_state->s[index].intr_trigger_level))
  455. *buf |= UART_IIR_RDI;
  456. /* Interrupt priotiry 3: transmitter holding register empty */
  457. if ((ier & UART_IER_THRI) &&
  458. (mdev_state->s[index].rxtx.head ==
  459. mdev_state->s[index].rxtx.tail))
  460. *buf |= UART_IIR_THRI;
  461. /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */
  462. if ((ier & UART_IER_MSI) &&
  463. (mdev_state->s[index].uart_reg[UART_MCR] &
  464. (UART_MCR_RTS | UART_MCR_DTR)))
  465. *buf |= UART_IIR_MSI;
  466. /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
  467. if (*buf == 0)
  468. *buf = UART_IIR_NO_INT;
  469. /* set bit 6 & 7 to be 16550 compatible */
  470. *buf |= 0xC0;
  471. mutex_unlock(&mdev_state->rxtx_lock);
  472. }
  473. break;
  474. case UART_LCR:
  475. case UART_MCR:
  476. *buf = mdev_state->s[index].uart_reg[offset];
  477. break;
  478. case UART_LSR:
  479. {
  480. u8 lsr = 0;
  481. mutex_lock(&mdev_state->rxtx_lock);
  482. /* atleast one char in FIFO */
  483. if (mdev_state->s[index].rxtx.head !=
  484. mdev_state->s[index].rxtx.tail)
  485. lsr |= UART_LSR_DR;
  486. /* if FIFO overrun */
  487. if (mdev_state->s[index].overrun)
  488. lsr |= UART_LSR_OE;
  489. /* transmit FIFO empty and tramsitter empty */
  490. if (mdev_state->s[index].rxtx.head ==
  491. mdev_state->s[index].rxtx.tail)
  492. lsr |= UART_LSR_TEMT | UART_LSR_THRE;
  493. mutex_unlock(&mdev_state->rxtx_lock);
  494. *buf = lsr;
  495. break;
  496. }
  497. case UART_MSR:
  498. *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
  499. mutex_lock(&mdev_state->rxtx_lock);
  500. /* if AFE is 1 and FIFO have space, set CTS bit */
  501. if (mdev_state->s[index].uart_reg[UART_MCR] &
  502. UART_MCR_AFE) {
  503. if (mdev_state->s[index].rxtx.count <
  504. mdev_state->s[index].max_fifo_size)
  505. *buf |= UART_MSR_CTS | UART_MSR_DCTS;
  506. } else
  507. *buf |= UART_MSR_CTS | UART_MSR_DCTS;
  508. mutex_unlock(&mdev_state->rxtx_lock);
  509. break;
  510. case UART_SCR:
  511. *buf = mdev_state->s[index].uart_reg[offset];
  512. break;
  513. default:
  514. break;
  515. }
  516. }
  517. static void mdev_read_base(struct mdev_state *mdev_state)
  518. {
  519. int index, pos;
  520. u32 start_lo, start_hi;
  521. u32 mem_type;
  522. pos = PCI_BASE_ADDRESS_0;
  523. for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
  524. if (!mdev_state->region_info[index].size)
  525. continue;
  526. start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
  527. PCI_BASE_ADDRESS_MEM_MASK;
  528. mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
  529. PCI_BASE_ADDRESS_MEM_TYPE_MASK;
  530. switch (mem_type) {
  531. case PCI_BASE_ADDRESS_MEM_TYPE_64:
  532. start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
  533. pos += 4;
  534. break;
  535. case PCI_BASE_ADDRESS_MEM_TYPE_32:
  536. case PCI_BASE_ADDRESS_MEM_TYPE_1M:
  537. /* 1M mem BAR treated as 32-bit BAR */
  538. default:
  539. /* mem unknown type treated as 32-bit BAR */
  540. start_hi = 0;
  541. break;
  542. }
  543. pos += 4;
  544. mdev_state->region_info[index].start = ((u64)start_hi << 32) |
  545. start_lo;
  546. }
  547. }
  548. static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
  549. loff_t pos, bool is_write)
  550. {
  551. struct mdev_state *mdev_state;
  552. unsigned int index;
  553. loff_t offset;
  554. int ret = 0;
  555. if (!mdev || !buf)
  556. return -EINVAL;
  557. mdev_state = mdev_get_drvdata(mdev);
  558. if (!mdev_state) {
  559. pr_err("%s mdev_state not found\n", __func__);
  560. return -EINVAL;
  561. }
  562. mutex_lock(&mdev_state->ops_lock);
  563. index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
  564. offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
  565. switch (index) {
  566. case VFIO_PCI_CONFIG_REGION_INDEX:
  567. #if defined(DEBUG)
  568. pr_info("%s: PCI config space %s at offset 0x%llx\n",
  569. __func__, is_write ? "write" : "read", offset);
  570. #endif
  571. if (is_write) {
  572. dump_buffer(buf, count);
  573. handle_pci_cfg_write(mdev_state, offset, buf, count);
  574. } else {
  575. memcpy(buf, (mdev_state->vconfig + offset), count);
  576. dump_buffer(buf, count);
  577. }
  578. break;
  579. case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
  580. if (!mdev_state->region_info[index].start)
  581. mdev_read_base(mdev_state);
  582. if (is_write) {
  583. dump_buffer(buf, count);
  584. #if defined(DEBUG_REGS)
  585. pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n",
  586. __func__, index, offset, wr_reg[offset],
  587. (u8)*buf, mdev_state->s[index].dlab);
  588. #endif
  589. handle_bar_write(index, mdev_state, offset, buf, count);
  590. } else {
  591. handle_bar_read(index, mdev_state, offset, buf, count);
  592. dump_buffer(buf, count);
  593. #if defined(DEBUG_REGS)
  594. pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n",
  595. __func__, index, offset, rd_reg[offset],
  596. (u8)*buf, mdev_state->s[index].dlab);
  597. #endif
  598. }
  599. break;
  600. default:
  601. ret = -1;
  602. goto accessfailed;
  603. }
  604. ret = count;
  605. accessfailed:
  606. mutex_unlock(&mdev_state->ops_lock);
  607. return ret;
  608. }
  609. int mtty_create(struct kobject *kobj, struct mdev_device *mdev)
  610. {
  611. struct mdev_state *mdev_state;
  612. char name[MTTY_STRING_LEN];
  613. int nr_ports = 0, i;
  614. if (!mdev)
  615. return -EINVAL;
  616. for (i = 0; i < 2; i++) {
  617. snprintf(name, MTTY_STRING_LEN, "%s-%d",
  618. dev_driver_string(mdev_parent_dev(mdev)), i + 1);
  619. if (!strcmp(kobj->name, name)) {
  620. nr_ports = i + 1;
  621. break;
  622. }
  623. }
  624. if (!nr_ports)
  625. return -EINVAL;
  626. mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
  627. if (mdev_state == NULL)
  628. return -ENOMEM;
  629. mdev_state->nr_ports = nr_ports;
  630. mdev_state->irq_index = -1;
  631. mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
  632. mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
  633. mutex_init(&mdev_state->rxtx_lock);
  634. mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
  635. if (mdev_state->vconfig == NULL) {
  636. kfree(mdev_state);
  637. return -ENOMEM;
  638. }
  639. mutex_init(&mdev_state->ops_lock);
  640. mdev_state->mdev = mdev;
  641. mdev_set_drvdata(mdev, mdev_state);
  642. mtty_create_config_space(mdev_state);
  643. mutex_lock(&mdev_list_lock);
  644. list_add(&mdev_state->next, &mdev_devices_list);
  645. mutex_unlock(&mdev_list_lock);
  646. return 0;
  647. }
  648. int mtty_remove(struct mdev_device *mdev)
  649. {
  650. struct mdev_state *mds, *tmp_mds;
  651. struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
  652. int ret = -EINVAL;
  653. mutex_lock(&mdev_list_lock);
  654. list_for_each_entry_safe(mds, tmp_mds, &mdev_devices_list, next) {
  655. if (mdev_state == mds) {
  656. list_del(&mdev_state->next);
  657. mdev_set_drvdata(mdev, NULL);
  658. kfree(mdev_state->vconfig);
  659. kfree(mdev_state);
  660. ret = 0;
  661. break;
  662. }
  663. }
  664. mutex_unlock(&mdev_list_lock);
  665. return ret;
  666. }
  667. int mtty_reset(struct mdev_device *mdev)
  668. {
  669. struct mdev_state *mdev_state;
  670. if (!mdev)
  671. return -EINVAL;
  672. mdev_state = mdev_get_drvdata(mdev);
  673. if (!mdev_state)
  674. return -EINVAL;
  675. pr_info("%s: called\n", __func__);
  676. return 0;
  677. }
  678. ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count,
  679. loff_t *ppos)
  680. {
  681. unsigned int done = 0;
  682. int ret;
  683. while (count) {
  684. size_t filled;
  685. if (count >= 4 && !(*ppos % 4)) {
  686. u32 val;
  687. ret = mdev_access(mdev, (char *)&val, sizeof(val),
  688. *ppos, false);
  689. if (ret <= 0)
  690. goto read_err;
  691. if (copy_to_user(buf, &val, sizeof(val)))
  692. goto read_err;
  693. filled = 4;
  694. } else if (count >= 2 && !(*ppos % 2)) {
  695. u16 val;
  696. ret = mdev_access(mdev, (char *)&val, sizeof(val),
  697. *ppos, false);
  698. if (ret <= 0)
  699. goto read_err;
  700. if (copy_to_user(buf, &val, sizeof(val)))
  701. goto read_err;
  702. filled = 2;
  703. } else {
  704. u8 val;
  705. ret = mdev_access(mdev, (char *)&val, sizeof(val),
  706. *ppos, false);
  707. if (ret <= 0)
  708. goto read_err;
  709. if (copy_to_user(buf, &val, sizeof(val)))
  710. goto read_err;
  711. filled = 1;
  712. }
  713. count -= filled;
  714. done += filled;
  715. *ppos += filled;
  716. buf += filled;
  717. }
  718. return done;
  719. read_err:
  720. return -EFAULT;
  721. }
  722. ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
  723. size_t count, loff_t *ppos)
  724. {
  725. unsigned int done = 0;
  726. int ret;
  727. while (count) {
  728. size_t filled;
  729. if (count >= 4 && !(*ppos % 4)) {
  730. u32 val;
  731. if (copy_from_user(&val, buf, sizeof(val)))
  732. goto write_err;
  733. ret = mdev_access(mdev, (char *)&val, sizeof(val),
  734. *ppos, true);
  735. if (ret <= 0)
  736. goto write_err;
  737. filled = 4;
  738. } else if (count >= 2 && !(*ppos % 2)) {
  739. u16 val;
  740. if (copy_from_user(&val, buf, sizeof(val)))
  741. goto write_err;
  742. ret = mdev_access(mdev, (char *)&val, sizeof(val),
  743. *ppos, true);
  744. if (ret <= 0)
  745. goto write_err;
  746. filled = 2;
  747. } else {
  748. u8 val;
  749. if (copy_from_user(&val, buf, sizeof(val)))
  750. goto write_err;
  751. ret = mdev_access(mdev, (char *)&val, sizeof(val),
  752. *ppos, true);
  753. if (ret <= 0)
  754. goto write_err;
  755. filled = 1;
  756. }
  757. count -= filled;
  758. done += filled;
  759. *ppos += filled;
  760. buf += filled;
  761. }
  762. return done;
  763. write_err:
  764. return -EFAULT;
  765. }
  766. static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags,
  767. unsigned int index, unsigned int start,
  768. unsigned int count, void *data)
  769. {
  770. int ret = 0;
  771. struct mdev_state *mdev_state;
  772. if (!mdev)
  773. return -EINVAL;
  774. mdev_state = mdev_get_drvdata(mdev);
  775. if (!mdev_state)
  776. return -EINVAL;
  777. mutex_lock(&mdev_state->ops_lock);
  778. switch (index) {
  779. case VFIO_PCI_INTX_IRQ_INDEX:
  780. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  781. case VFIO_IRQ_SET_ACTION_MASK:
  782. case VFIO_IRQ_SET_ACTION_UNMASK:
  783. break;
  784. case VFIO_IRQ_SET_ACTION_TRIGGER:
  785. {
  786. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  787. pr_info("%s: disable INTx\n", __func__);
  788. if (mdev_state->intx_evtfd)
  789. eventfd_ctx_put(mdev_state->intx_evtfd);
  790. break;
  791. }
  792. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  793. int fd = *(int *)data;
  794. if (fd > 0) {
  795. struct eventfd_ctx *evt;
  796. evt = eventfd_ctx_fdget(fd);
  797. if (IS_ERR(evt)) {
  798. ret = PTR_ERR(evt);
  799. break;
  800. }
  801. mdev_state->intx_evtfd = evt;
  802. mdev_state->irq_fd = fd;
  803. mdev_state->irq_index = index;
  804. break;
  805. }
  806. }
  807. break;
  808. }
  809. }
  810. break;
  811. case VFIO_PCI_MSI_IRQ_INDEX:
  812. switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  813. case VFIO_IRQ_SET_ACTION_MASK:
  814. case VFIO_IRQ_SET_ACTION_UNMASK:
  815. break;
  816. case VFIO_IRQ_SET_ACTION_TRIGGER:
  817. if (flags & VFIO_IRQ_SET_DATA_NONE) {
  818. if (mdev_state->msi_evtfd)
  819. eventfd_ctx_put(mdev_state->msi_evtfd);
  820. pr_info("%s: disable MSI\n", __func__);
  821. mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
  822. break;
  823. }
  824. if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  825. int fd = *(int *)data;
  826. struct eventfd_ctx *evt;
  827. if (fd <= 0)
  828. break;
  829. if (mdev_state->msi_evtfd)
  830. break;
  831. evt = eventfd_ctx_fdget(fd);
  832. if (IS_ERR(evt)) {
  833. ret = PTR_ERR(evt);
  834. break;
  835. }
  836. mdev_state->msi_evtfd = evt;
  837. mdev_state->irq_fd = fd;
  838. mdev_state->irq_index = index;
  839. }
  840. break;
  841. }
  842. break;
  843. case VFIO_PCI_MSIX_IRQ_INDEX:
  844. pr_info("%s: MSIX_IRQ\n", __func__);
  845. break;
  846. case VFIO_PCI_ERR_IRQ_INDEX:
  847. pr_info("%s: ERR_IRQ\n", __func__);
  848. break;
  849. case VFIO_PCI_REQ_IRQ_INDEX:
  850. pr_info("%s: REQ_IRQ\n", __func__);
  851. break;
  852. }
  853. mutex_unlock(&mdev_state->ops_lock);
  854. return ret;
  855. }
  856. static int mtty_trigger_interrupt(uuid_le uuid)
  857. {
  858. int ret = -1;
  859. struct mdev_state *mdev_state;
  860. mdev_state = find_mdev_state_by_uuid(uuid);
  861. if (!mdev_state) {
  862. pr_info("%s: mdev not found\n", __func__);
  863. return -EINVAL;
  864. }
  865. if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
  866. (!mdev_state->msi_evtfd))
  867. return -EINVAL;
  868. else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
  869. (!mdev_state->intx_evtfd)) {
  870. pr_info("%s: Intr eventfd not found\n", __func__);
  871. return -EINVAL;
  872. }
  873. if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
  874. ret = eventfd_signal(mdev_state->msi_evtfd, 1);
  875. else
  876. ret = eventfd_signal(mdev_state->intx_evtfd, 1);
  877. #if defined(DEBUG_INTR)
  878. pr_info("Intx triggered\n");
  879. #endif
  880. if (ret != 1)
  881. pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
  882. return ret;
  883. }
  884. int mtty_get_region_info(struct mdev_device *mdev,
  885. struct vfio_region_info *region_info,
  886. u16 *cap_type_id, void **cap_type)
  887. {
  888. unsigned int size = 0;
  889. struct mdev_state *mdev_state;
  890. int bar_index;
  891. if (!mdev)
  892. return -EINVAL;
  893. mdev_state = mdev_get_drvdata(mdev);
  894. if (!mdev_state)
  895. return -EINVAL;
  896. mutex_lock(&mdev_state->ops_lock);
  897. bar_index = region_info->index;
  898. switch (bar_index) {
  899. case VFIO_PCI_CONFIG_REGION_INDEX:
  900. size = MTTY_CONFIG_SPACE_SIZE;
  901. break;
  902. case VFIO_PCI_BAR0_REGION_INDEX:
  903. size = MTTY_IO_BAR_SIZE;
  904. break;
  905. case VFIO_PCI_BAR1_REGION_INDEX:
  906. if (mdev_state->nr_ports == 2)
  907. size = MTTY_IO_BAR_SIZE;
  908. break;
  909. default:
  910. size = 0;
  911. break;
  912. }
  913. mdev_state->region_info[bar_index].size = size;
  914. mdev_state->region_info[bar_index].vfio_offset =
  915. MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
  916. region_info->size = size;
  917. region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
  918. region_info->flags = VFIO_REGION_INFO_FLAG_READ |
  919. VFIO_REGION_INFO_FLAG_WRITE;
  920. mutex_unlock(&mdev_state->ops_lock);
  921. return 0;
  922. }
  923. int mtty_get_irq_info(struct mdev_device *mdev, struct vfio_irq_info *irq_info)
  924. {
  925. switch (irq_info->index) {
  926. case VFIO_PCI_INTX_IRQ_INDEX:
  927. case VFIO_PCI_MSI_IRQ_INDEX:
  928. case VFIO_PCI_REQ_IRQ_INDEX:
  929. break;
  930. default:
  931. return -EINVAL;
  932. }
  933. irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
  934. irq_info->count = 1;
  935. if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
  936. irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
  937. VFIO_IRQ_INFO_AUTOMASKED);
  938. else
  939. irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
  940. return 0;
  941. }
  942. int mtty_get_device_info(struct mdev_device *mdev,
  943. struct vfio_device_info *dev_info)
  944. {
  945. dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
  946. dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
  947. dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
  948. return 0;
  949. }
  950. static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
  951. unsigned long arg)
  952. {
  953. int ret = 0;
  954. unsigned long minsz;
  955. struct mdev_state *mdev_state;
  956. if (!mdev)
  957. return -EINVAL;
  958. mdev_state = mdev_get_drvdata(mdev);
  959. if (!mdev_state)
  960. return -ENODEV;
  961. switch (cmd) {
  962. case VFIO_DEVICE_GET_INFO:
  963. {
  964. struct vfio_device_info info;
  965. minsz = offsetofend(struct vfio_device_info, num_irqs);
  966. if (copy_from_user(&info, (void __user *)arg, minsz))
  967. return -EFAULT;
  968. if (info.argsz < minsz)
  969. return -EINVAL;
  970. ret = mtty_get_device_info(mdev, &info);
  971. if (ret)
  972. return ret;
  973. memcpy(&mdev_state->dev_info, &info, sizeof(info));
  974. return copy_to_user((void __user *)arg, &info, minsz);
  975. }
  976. case VFIO_DEVICE_GET_REGION_INFO:
  977. {
  978. struct vfio_region_info info;
  979. u16 cap_type_id = 0;
  980. void *cap_type = NULL;
  981. minsz = offsetofend(struct vfio_region_info, offset);
  982. if (copy_from_user(&info, (void __user *)arg, minsz))
  983. return -EFAULT;
  984. if (info.argsz < minsz)
  985. return -EINVAL;
  986. ret = mtty_get_region_info(mdev, &info, &cap_type_id,
  987. &cap_type);
  988. if (ret)
  989. return ret;
  990. return copy_to_user((void __user *)arg, &info, minsz);
  991. }
  992. case VFIO_DEVICE_GET_IRQ_INFO:
  993. {
  994. struct vfio_irq_info info;
  995. minsz = offsetofend(struct vfio_irq_info, count);
  996. if (copy_from_user(&info, (void __user *)arg, minsz))
  997. return -EFAULT;
  998. if ((info.argsz < minsz) ||
  999. (info.index >= mdev_state->dev_info.num_irqs))
  1000. return -EINVAL;
  1001. ret = mtty_get_irq_info(mdev, &info);
  1002. if (ret)
  1003. return ret;
  1004. if (info.count == -1)
  1005. return -EINVAL;
  1006. return copy_to_user((void __user *)arg, &info, minsz);
  1007. }
  1008. case VFIO_DEVICE_SET_IRQS:
  1009. {
  1010. struct vfio_irq_set hdr;
  1011. u8 *data = NULL, *ptr = NULL;
  1012. size_t data_size = 0;
  1013. minsz = offsetofend(struct vfio_irq_set, count);
  1014. if (copy_from_user(&hdr, (void __user *)arg, minsz))
  1015. return -EFAULT;
  1016. ret = vfio_set_irqs_validate_and_prepare(&hdr,
  1017. mdev_state->dev_info.num_irqs,
  1018. VFIO_PCI_NUM_IRQS,
  1019. &data_size);
  1020. if (ret)
  1021. return ret;
  1022. if (data_size) {
  1023. ptr = data = memdup_user((void __user *)(arg + minsz),
  1024. data_size);
  1025. if (IS_ERR(data))
  1026. return PTR_ERR(data);
  1027. }
  1028. ret = mtty_set_irqs(mdev, hdr.flags, hdr.index, hdr.start,
  1029. hdr.count, data);
  1030. kfree(ptr);
  1031. return ret;
  1032. }
  1033. case VFIO_DEVICE_RESET:
  1034. return mtty_reset(mdev);
  1035. }
  1036. return -ENOTTY;
  1037. }
  1038. int mtty_open(struct mdev_device *mdev)
  1039. {
  1040. pr_info("%s\n", __func__);
  1041. return 0;
  1042. }
  1043. void mtty_close(struct mdev_device *mdev)
  1044. {
  1045. pr_info("%s\n", __func__);
  1046. }
  1047. static ssize_t
  1048. sample_mtty_dev_show(struct device *dev, struct device_attribute *attr,
  1049. char *buf)
  1050. {
  1051. return sprintf(buf, "This is phy device\n");
  1052. }
  1053. static DEVICE_ATTR_RO(sample_mtty_dev);
  1054. static struct attribute *mtty_dev_attrs[] = {
  1055. &dev_attr_sample_mtty_dev.attr,
  1056. NULL,
  1057. };
  1058. static const struct attribute_group mtty_dev_group = {
  1059. .name = "mtty_dev",
  1060. .attrs = mtty_dev_attrs,
  1061. };
  1062. const struct attribute_group *mtty_dev_groups[] = {
  1063. &mtty_dev_group,
  1064. NULL,
  1065. };
  1066. static ssize_t
  1067. sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
  1068. char *buf)
  1069. {
  1070. struct mdev_device *mdev = to_mdev_device(dev);
  1071. if (mdev)
  1072. return sprintf(buf, "This is MDEV %s\n", dev_name(&mdev->dev));
  1073. return sprintf(buf, "\n");
  1074. }
  1075. static DEVICE_ATTR_RO(sample_mdev_dev);
  1076. static struct attribute *mdev_dev_attrs[] = {
  1077. &dev_attr_sample_mdev_dev.attr,
  1078. NULL,
  1079. };
  1080. static const struct attribute_group mdev_dev_group = {
  1081. .name = "vendor",
  1082. .attrs = mdev_dev_attrs,
  1083. };
  1084. const struct attribute_group *mdev_dev_groups[] = {
  1085. &mdev_dev_group,
  1086. NULL,
  1087. };
  1088. static ssize_t
  1089. name_show(struct kobject *kobj, struct device *dev, char *buf)
  1090. {
  1091. char name[MTTY_STRING_LEN];
  1092. int i;
  1093. const char *name_str[2] = {"Single port serial", "Dual port serial"};
  1094. for (i = 0; i < 2; i++) {
  1095. snprintf(name, MTTY_STRING_LEN, "%s-%d",
  1096. dev_driver_string(dev), i + 1);
  1097. if (!strcmp(kobj->name, name))
  1098. return sprintf(buf, "%s\n", name_str[i]);
  1099. }
  1100. return -EINVAL;
  1101. }
  1102. MDEV_TYPE_ATTR_RO(name);
  1103. static ssize_t
  1104. available_instances_show(struct kobject *kobj, struct device *dev, char *buf)
  1105. {
  1106. char name[MTTY_STRING_LEN];
  1107. int i;
  1108. struct mdev_state *mds;
  1109. int ports = 0, used = 0;
  1110. for (i = 0; i < 2; i++) {
  1111. snprintf(name, MTTY_STRING_LEN, "%s-%d",
  1112. dev_driver_string(dev), i + 1);
  1113. if (!strcmp(kobj->name, name)) {
  1114. ports = i + 1;
  1115. break;
  1116. }
  1117. }
  1118. if (!ports)
  1119. return -EINVAL;
  1120. list_for_each_entry(mds, &mdev_devices_list, next)
  1121. used += mds->nr_ports;
  1122. return sprintf(buf, "%d\n", (MAX_MTTYS - used)/ports);
  1123. }
  1124. MDEV_TYPE_ATTR_RO(available_instances);
  1125. static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
  1126. char *buf)
  1127. {
  1128. return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
  1129. }
  1130. MDEV_TYPE_ATTR_RO(device_api);
  1131. static struct attribute *mdev_types_attrs[] = {
  1132. &mdev_type_attr_name.attr,
  1133. &mdev_type_attr_device_api.attr,
  1134. &mdev_type_attr_available_instances.attr,
  1135. NULL,
  1136. };
  1137. static struct attribute_group mdev_type_group1 = {
  1138. .name = "1",
  1139. .attrs = mdev_types_attrs,
  1140. };
  1141. static struct attribute_group mdev_type_group2 = {
  1142. .name = "2",
  1143. .attrs = mdev_types_attrs,
  1144. };
  1145. struct attribute_group *mdev_type_groups[] = {
  1146. &mdev_type_group1,
  1147. &mdev_type_group2,
  1148. NULL,
  1149. };
  1150. struct mdev_parent_ops mdev_fops = {
  1151. .owner = THIS_MODULE,
  1152. .dev_attr_groups = mtty_dev_groups,
  1153. .mdev_attr_groups = mdev_dev_groups,
  1154. .supported_type_groups = mdev_type_groups,
  1155. .create = mtty_create,
  1156. .remove = mtty_remove,
  1157. .open = mtty_open,
  1158. .release = mtty_close,
  1159. .read = mtty_read,
  1160. .write = mtty_write,
  1161. .ioctl = mtty_ioctl,
  1162. };
  1163. static void mtty_device_release(struct device *dev)
  1164. {
  1165. dev_dbg(dev, "mtty: released\n");
  1166. }
  1167. static int __init mtty_dev_init(void)
  1168. {
  1169. int ret = 0;
  1170. pr_info("mtty_dev: %s\n", __func__);
  1171. memset(&mtty_dev, 0, sizeof(mtty_dev));
  1172. idr_init(&mtty_dev.vd_idr);
  1173. ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK, MTTY_NAME);
  1174. if (ret < 0) {
  1175. pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
  1176. return ret;
  1177. }
  1178. cdev_init(&mtty_dev.vd_cdev, &vd_fops);
  1179. cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK);
  1180. pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
  1181. mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
  1182. if (IS_ERR(mtty_dev.vd_class)) {
  1183. pr_err("Error: failed to register mtty_dev class\n");
  1184. goto failed1;
  1185. }
  1186. mtty_dev.dev.class = mtty_dev.vd_class;
  1187. mtty_dev.dev.release = mtty_device_release;
  1188. dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
  1189. ret = device_register(&mtty_dev.dev);
  1190. if (ret)
  1191. goto failed2;
  1192. if (mdev_register_device(&mtty_dev.dev, &mdev_fops) != 0)
  1193. goto failed3;
  1194. mutex_init(&mdev_list_lock);
  1195. INIT_LIST_HEAD(&mdev_devices_list);
  1196. goto all_done;
  1197. failed3:
  1198. device_unregister(&mtty_dev.dev);
  1199. failed2:
  1200. class_destroy(mtty_dev.vd_class);
  1201. failed1:
  1202. cdev_del(&mtty_dev.vd_cdev);
  1203. unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
  1204. all_done:
  1205. return ret;
  1206. }
  1207. static void __exit mtty_dev_exit(void)
  1208. {
  1209. mtty_dev.dev.bus = NULL;
  1210. mdev_unregister_device(&mtty_dev.dev);
  1211. device_unregister(&mtty_dev.dev);
  1212. idr_destroy(&mtty_dev.vd_idr);
  1213. cdev_del(&mtty_dev.vd_cdev);
  1214. unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
  1215. class_destroy(mtty_dev.vd_class);
  1216. mtty_dev.vd_class = NULL;
  1217. pr_info("mtty_dev: Unloaded!\n");
  1218. }
  1219. module_init(mtty_dev_init)
  1220. module_exit(mtty_dev_exit)
  1221. MODULE_LICENSE("GPL v2");
  1222. MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
  1223. MODULE_VERSION(VERSION_STRING);
  1224. MODULE_AUTHOR(DRIVER_AUTHOR);