goldfish_pipe.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. /*
  2. * Copyright (C) 2011 Google, Inc.
  3. * Copyright (C) 2012 Intel, Inc.
  4. * Copyright (C) 2013 Intel, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. /* This source file contains the implementation of a special device driver
  17. * that intends to provide a *very* fast communication channel between the
  18. * guest system and the QEMU emulator.
  19. *
  20. * Usage from the guest is simply the following (error handling simplified):
  21. *
  22. * int fd = open("/dev/qemu_pipe",O_RDWR);
  23. * .... write() or read() through the pipe.
  24. *
  25. * This driver doesn't deal with the exact protocol used during the session.
  26. * It is intended to be as simple as something like:
  27. *
  28. * // do this _just_ after opening the fd to connect to a specific
  29. * // emulator service.
  30. * const char* msg = "<pipename>";
  31. * if (write(fd, msg, strlen(msg)+1) < 0) {
  32. * ... could not connect to <pipename> service
  33. * close(fd);
  34. * }
  35. *
  36. * // after this, simply read() and write() to communicate with the
  37. * // service. Exact protocol details left as an exercise to the reader.
  38. *
  39. * This driver is very fast because it doesn't copy any data through
  40. * intermediate buffers, since the emulator is capable of translating
  41. * guest user addresses into host ones.
  42. *
  43. * Note that we must however ensure that each user page involved in the
  44. * exchange is properly mapped during a transfer.
  45. */
  46. #include <linux/module.h>
  47. #include <linux/interrupt.h>
  48. #include <linux/kernel.h>
  49. #include <linux/spinlock.h>
  50. #include <linux/miscdevice.h>
  51. #include <linux/platform_device.h>
  52. #include <linux/poll.h>
  53. #include <linux/sched.h>
  54. #include <linux/bitops.h>
  55. #include <linux/slab.h>
  56. #include <linux/io.h>
  57. #include <linux/goldfish.h>
  58. /*
  59. * IMPORTANT: The following constants must match the ones used and defined
  60. * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
  61. */
  62. /* pipe device registers */
  63. #define PIPE_REG_COMMAND 0x00 /* write: value = command */
  64. #define PIPE_REG_STATUS 0x04 /* read */
  65. #define PIPE_REG_CHANNEL 0x08 /* read/write: channel id */
  66. #define PIPE_REG_CHANNEL_HIGH 0x30 /* read/write: channel id */
  67. #define PIPE_REG_SIZE 0x0c /* read/write: buffer size */
  68. #define PIPE_REG_ADDRESS 0x10 /* write: physical address */
  69. #define PIPE_REG_ADDRESS_HIGH 0x34 /* write: physical address */
  70. #define PIPE_REG_WAKES 0x14 /* read: wake flags */
  71. #define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */
  72. #define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */
  73. #define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */
  74. /* list of commands for PIPE_REG_COMMAND */
  75. #define CMD_OPEN 1 /* open new channel */
  76. #define CMD_CLOSE 2 /* close channel (from guest) */
  77. #define CMD_POLL 3 /* poll read/write status */
  78. /* List of bitflags returned in status of CMD_POLL command */
  79. #define PIPE_POLL_IN (1 << 0)
  80. #define PIPE_POLL_OUT (1 << 1)
  81. #define PIPE_POLL_HUP (1 << 2)
  82. /* The following commands are related to write operations */
  83. #define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */
  84. #define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing
  85. is possible */
  86. /* The following commands are related to read operations, they must be
  87. * listed in the same order than the corresponding write ones, since we
  88. * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
  89. * in goldfish_pipe_read_write() below.
  90. */
  91. #define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */
  92. #define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading
  93. * is possible */
  94. /* Possible status values used to signal errors - see goldfish_pipe_error_convert */
  95. #define PIPE_ERROR_INVAL -1
  96. #define PIPE_ERROR_AGAIN -2
  97. #define PIPE_ERROR_NOMEM -3
  98. #define PIPE_ERROR_IO -4
  99. /* Bit-flags used to signal events from the emulator */
  100. #define PIPE_WAKE_CLOSED (1 << 0) /* emulator closed pipe */
  101. #define PIPE_WAKE_READ (1 << 1) /* pipe can now be read from */
  102. #define PIPE_WAKE_WRITE (1 << 2) /* pipe can now be written to */
  103. struct access_params {
  104. unsigned long channel;
  105. u32 size;
  106. unsigned long address;
  107. u32 cmd;
  108. u32 result;
  109. /* reserved for future extension */
  110. u32 flags;
  111. };
  112. /* The global driver data. Holds a reference to the i/o page used to
  113. * communicate with the emulator, and a wake queue for blocked tasks
  114. * waiting to be awoken.
  115. */
  116. struct goldfish_pipe_dev {
  117. spinlock_t lock;
  118. unsigned char __iomem *base;
  119. struct access_params *aps;
  120. int irq;
  121. };
  122. static struct goldfish_pipe_dev pipe_dev[1];
  123. /* This data type models a given pipe instance */
  124. struct goldfish_pipe {
  125. struct goldfish_pipe_dev *dev;
  126. struct mutex lock;
  127. unsigned long flags;
  128. wait_queue_head_t wake_queue;
  129. };
  130. /* Bit flags for the 'flags' field */
  131. enum {
  132. BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
  133. BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
  134. BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
  135. };
  136. static u32 goldfish_cmd_status(struct goldfish_pipe *pipe, u32 cmd)
  137. {
  138. unsigned long flags;
  139. u32 status;
  140. struct goldfish_pipe_dev *dev = pipe->dev;
  141. spin_lock_irqsave(&dev->lock, flags);
  142. gf_write64((u64)(unsigned long)pipe, dev->base + PIPE_REG_CHANNEL,
  143. dev->base + PIPE_REG_CHANNEL_HIGH);
  144. writel(cmd, dev->base + PIPE_REG_COMMAND);
  145. status = readl(dev->base + PIPE_REG_STATUS);
  146. spin_unlock_irqrestore(&dev->lock, flags);
  147. return status;
  148. }
  149. static void goldfish_cmd(struct goldfish_pipe *pipe, u32 cmd)
  150. {
  151. unsigned long flags;
  152. struct goldfish_pipe_dev *dev = pipe->dev;
  153. spin_lock_irqsave(&dev->lock, flags);
  154. gf_write64((u64)(unsigned long)pipe, dev->base + PIPE_REG_CHANNEL,
  155. dev->base + PIPE_REG_CHANNEL_HIGH);
  156. writel(cmd, dev->base + PIPE_REG_COMMAND);
  157. spin_unlock_irqrestore(&dev->lock, flags);
  158. }
  159. /* This function converts an error code returned by the emulator through
  160. * the PIPE_REG_STATUS i/o register into a valid negative errno value.
  161. */
  162. static int goldfish_pipe_error_convert(int status)
  163. {
  164. switch (status) {
  165. case PIPE_ERROR_AGAIN:
  166. return -EAGAIN;
  167. case PIPE_ERROR_NOMEM:
  168. return -ENOMEM;
  169. case PIPE_ERROR_IO:
  170. return -EIO;
  171. default:
  172. return -EINVAL;
  173. }
  174. }
  175. /*
  176. * Notice: QEMU will return 0 for un-known register access, indicating
  177. * param_acess is supported or not
  178. */
  179. static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
  180. struct access_params *aps)
  181. {
  182. u32 aph, apl;
  183. u64 paddr;
  184. aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
  185. apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW);
  186. paddr = ((u64)aph << 32) | apl;
  187. if (paddr != (__pa(aps)))
  188. return 0;
  189. return 1;
  190. }
  191. /* 0 on success */
  192. static int setup_access_params_addr(struct platform_device *pdev,
  193. struct goldfish_pipe_dev *dev)
  194. {
  195. u64 paddr;
  196. struct access_params *aps;
  197. aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL);
  198. if (!aps)
  199. return -1;
  200. /* FIXME */
  201. paddr = __pa(aps);
  202. writel((u32)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
  203. writel((u32)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW);
  204. if (valid_batchbuffer_addr(dev, aps)) {
  205. dev->aps = aps;
  206. return 0;
  207. } else
  208. return -1;
  209. }
  210. /* A value that will not be set by qemu emulator */
  211. #define INITIAL_BATCH_RESULT (0xdeadbeaf)
  212. static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
  213. unsigned long address, unsigned long avail,
  214. struct goldfish_pipe *pipe, int *status)
  215. {
  216. struct access_params *aps = dev->aps;
  217. if (aps == NULL)
  218. return -1;
  219. aps->result = INITIAL_BATCH_RESULT;
  220. aps->channel = (unsigned long)pipe;
  221. aps->size = avail;
  222. aps->address = address;
  223. aps->cmd = cmd;
  224. writel(cmd, dev->base + PIPE_REG_ACCESS_PARAMS);
  225. /*
  226. * If the aps->result has not changed, that means
  227. * that the batch command failed
  228. */
  229. if (aps->result == INITIAL_BATCH_RESULT)
  230. return -1;
  231. *status = aps->result;
  232. return 0;
  233. }
  234. /* This function is used for both reading from and writing to a given
  235. * pipe.
  236. */
  237. static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
  238. size_t bufflen, int is_write)
  239. {
  240. unsigned long irq_flags;
  241. struct goldfish_pipe *pipe = filp->private_data;
  242. struct goldfish_pipe_dev *dev = pipe->dev;
  243. const int cmd_offset = is_write ? 0
  244. : (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
  245. unsigned long address, address_end;
  246. int ret = 0;
  247. /* If the emulator already closed the pipe, no need to go further */
  248. if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
  249. return -EIO;
  250. /* Null reads or writes succeeds */
  251. if (unlikely(bufflen) == 0)
  252. return 0;
  253. /* Check the buffer range for access */
  254. if (!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
  255. buffer, bufflen))
  256. return -EFAULT;
  257. /* Serialize access to the pipe */
  258. if (mutex_lock_interruptible(&pipe->lock))
  259. return -ERESTARTSYS;
  260. address = (unsigned long)(void *)buffer;
  261. address_end = address + bufflen;
  262. while (address < address_end) {
  263. unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
  264. unsigned long next = page_end < address_end ? page_end
  265. : address_end;
  266. unsigned long avail = next - address;
  267. int status, wakeBit;
  268. /* Ensure that the corresponding page is properly mapped */
  269. /* FIXME: this isn't safe or sufficient - use get_user_pages */
  270. if (is_write) {
  271. char c;
  272. /* Ensure that the page is mapped and readable */
  273. if (__get_user(c, (char __user *)address)) {
  274. if (!ret)
  275. ret = -EFAULT;
  276. break;
  277. }
  278. } else {
  279. /* Ensure that the page is mapped and writable */
  280. if (__put_user(0, (char __user *)address)) {
  281. if (!ret)
  282. ret = -EFAULT;
  283. break;
  284. }
  285. }
  286. /* Now, try to transfer the bytes in the current page */
  287. spin_lock_irqsave(&dev->lock, irq_flags);
  288. if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset,
  289. address, avail, pipe, &status)) {
  290. gf_write64((u64)(unsigned long)pipe,
  291. dev->base + PIPE_REG_CHANNEL,
  292. dev->base + PIPE_REG_CHANNEL_HIGH);
  293. writel(avail, dev->base + PIPE_REG_SIZE);
  294. gf_write64(address, dev->base + PIPE_REG_ADDRESS,
  295. dev->base + PIPE_REG_ADDRESS_HIGH);
  296. writel(CMD_WRITE_BUFFER + cmd_offset,
  297. dev->base + PIPE_REG_COMMAND);
  298. status = readl(dev->base + PIPE_REG_STATUS);
  299. }
  300. spin_unlock_irqrestore(&dev->lock, irq_flags);
  301. if (status > 0) { /* Correct transfer */
  302. ret += status;
  303. address += status;
  304. continue;
  305. }
  306. if (status == 0) /* EOF */
  307. break;
  308. /* An error occured. If we already transfered stuff, just
  309. * return with its count. We expect the next call to return
  310. * an error code */
  311. if (ret > 0)
  312. break;
  313. /* If the error is not PIPE_ERROR_AGAIN, or if we are not in
  314. * non-blocking mode, just return the error code.
  315. */
  316. if (status != PIPE_ERROR_AGAIN ||
  317. (filp->f_flags & O_NONBLOCK) != 0) {
  318. ret = goldfish_pipe_error_convert(status);
  319. break;
  320. }
  321. /* We will have to wait until more data/space is available.
  322. * First, mark the pipe as waiting for a specific wake signal.
  323. */
  324. wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
  325. set_bit(wakeBit, &pipe->flags);
  326. /* Tell the emulator we're going to wait for a wake event */
  327. goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset);
  328. /* Unlock the pipe, then wait for the wake signal */
  329. mutex_unlock(&pipe->lock);
  330. while (test_bit(wakeBit, &pipe->flags)) {
  331. if (wait_event_interruptible(
  332. pipe->wake_queue,
  333. !test_bit(wakeBit, &pipe->flags)))
  334. return -ERESTARTSYS;
  335. if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
  336. return -EIO;
  337. }
  338. /* Try to re-acquire the lock */
  339. if (mutex_lock_interruptible(&pipe->lock))
  340. return -ERESTARTSYS;
  341. /* Try the transfer again */
  342. continue;
  343. }
  344. mutex_unlock(&pipe->lock);
  345. return ret;
  346. }
  347. static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
  348. size_t bufflen, loff_t *ppos)
  349. {
  350. return goldfish_pipe_read_write(filp, buffer, bufflen, 0);
  351. }
  352. static ssize_t goldfish_pipe_write(struct file *filp,
  353. const char __user *buffer, size_t bufflen,
  354. loff_t *ppos)
  355. {
  356. return goldfish_pipe_read_write(filp, (char __user *)buffer,
  357. bufflen, 1);
  358. }
  359. static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
  360. {
  361. struct goldfish_pipe *pipe = filp->private_data;
  362. unsigned int mask = 0;
  363. int status;
  364. mutex_lock(&pipe->lock);
  365. poll_wait(filp, &pipe->wake_queue, wait);
  366. status = goldfish_cmd_status(pipe, CMD_POLL);
  367. mutex_unlock(&pipe->lock);
  368. if (status & PIPE_POLL_IN)
  369. mask |= POLLIN | POLLRDNORM;
  370. if (status & PIPE_POLL_OUT)
  371. mask |= POLLOUT | POLLWRNORM;
  372. if (status & PIPE_POLL_HUP)
  373. mask |= POLLHUP;
  374. if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
  375. mask |= POLLERR;
  376. return mask;
  377. }
  378. static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
  379. {
  380. struct goldfish_pipe_dev *dev = dev_id;
  381. unsigned long irq_flags;
  382. int count = 0;
  383. /* We're going to read from the emulator a list of (channel,flags)
  384. * pairs corresponding to the wake events that occured on each
  385. * blocked pipe (i.e. channel).
  386. */
  387. spin_lock_irqsave(&dev->lock, irq_flags);
  388. for (;;) {
  389. /* First read the channel, 0 means the end of the list */
  390. struct goldfish_pipe *pipe;
  391. unsigned long wakes;
  392. unsigned long channel = 0;
  393. #ifdef CONFIG_64BIT
  394. channel = (u64)readl(dev->base + PIPE_REG_CHANNEL_HIGH) << 32;
  395. if (channel == 0)
  396. break;
  397. #endif
  398. channel |= readl(dev->base + PIPE_REG_CHANNEL);
  399. if (channel == 0)
  400. break;
  401. /* Convert channel to struct pipe pointer + read wake flags */
  402. wakes = readl(dev->base + PIPE_REG_WAKES);
  403. pipe = (struct goldfish_pipe *)(ptrdiff_t)channel;
  404. /* Did the emulator just closed a pipe? */
  405. if (wakes & PIPE_WAKE_CLOSED) {
  406. set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
  407. wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
  408. }
  409. if (wakes & PIPE_WAKE_READ)
  410. clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
  411. if (wakes & PIPE_WAKE_WRITE)
  412. clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
  413. wake_up_interruptible(&pipe->wake_queue);
  414. count++;
  415. }
  416. spin_unlock_irqrestore(&dev->lock, irq_flags);
  417. return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
  418. }
  419. /**
  420. * goldfish_pipe_open - open a channel to the AVD
  421. * @inode: inode of device
  422. * @file: file struct of opener
  423. *
  424. * Create a new pipe link between the emulator and the use application.
  425. * Each new request produces a new pipe.
  426. *
  427. * Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
  428. * right now so this is fine. A move to 64bit will need this addressing
  429. */
  430. static int goldfish_pipe_open(struct inode *inode, struct file *file)
  431. {
  432. struct goldfish_pipe *pipe;
  433. struct goldfish_pipe_dev *dev = pipe_dev;
  434. int32_t status;
  435. /* Allocate new pipe kernel object */
  436. pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
  437. if (pipe == NULL)
  438. return -ENOMEM;
  439. pipe->dev = dev;
  440. mutex_init(&pipe->lock);
  441. init_waitqueue_head(&pipe->wake_queue);
  442. /*
  443. * Now, tell the emulator we're opening a new pipe. We use the
  444. * pipe object's address as the channel identifier for simplicity.
  445. */
  446. status = goldfish_cmd_status(pipe, CMD_OPEN);
  447. if (status < 0) {
  448. kfree(pipe);
  449. return status;
  450. }
  451. /* All is done, save the pipe into the file's private data field */
  452. file->private_data = pipe;
  453. return 0;
  454. }
  455. static int goldfish_pipe_release(struct inode *inode, struct file *filp)
  456. {
  457. struct goldfish_pipe *pipe = filp->private_data;
  458. /* The guest is closing the channel, so tell the emulator right now */
  459. goldfish_cmd(pipe, CMD_CLOSE);
  460. kfree(pipe);
  461. filp->private_data = NULL;
  462. return 0;
  463. }
  464. static const struct file_operations goldfish_pipe_fops = {
  465. .owner = THIS_MODULE,
  466. .read = goldfish_pipe_read,
  467. .write = goldfish_pipe_write,
  468. .poll = goldfish_pipe_poll,
  469. .open = goldfish_pipe_open,
  470. .release = goldfish_pipe_release,
  471. };
  472. static struct miscdevice goldfish_pipe_device = {
  473. .minor = MISC_DYNAMIC_MINOR,
  474. .name = "goldfish_pipe",
  475. .fops = &goldfish_pipe_fops,
  476. };
  477. static int goldfish_pipe_probe(struct platform_device *pdev)
  478. {
  479. int err;
  480. struct resource *r;
  481. struct goldfish_pipe_dev *dev = pipe_dev;
  482. /* not thread safe, but this should not happen */
  483. WARN_ON(dev->base != NULL);
  484. spin_lock_init(&dev->lock);
  485. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  486. if (r == NULL || resource_size(r) < PAGE_SIZE) {
  487. dev_err(&pdev->dev, "can't allocate i/o page\n");
  488. return -EINVAL;
  489. }
  490. dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
  491. if (dev->base == NULL) {
  492. dev_err(&pdev->dev, "ioremap failed\n");
  493. return -EINVAL;
  494. }
  495. r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  496. if (r == NULL) {
  497. err = -EINVAL;
  498. goto error;
  499. }
  500. dev->irq = r->start;
  501. err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
  502. IRQF_SHARED, "goldfish_pipe", dev);
  503. if (err) {
  504. dev_err(&pdev->dev, "unable to allocate IRQ\n");
  505. goto error;
  506. }
  507. err = misc_register(&goldfish_pipe_device);
  508. if (err) {
  509. dev_err(&pdev->dev, "unable to register device\n");
  510. goto error;
  511. }
  512. setup_access_params_addr(pdev, dev);
  513. return 0;
  514. error:
  515. dev->base = NULL;
  516. return err;
  517. }
  518. static int goldfish_pipe_remove(struct platform_device *pdev)
  519. {
  520. struct goldfish_pipe_dev *dev = pipe_dev;
  521. misc_deregister(&goldfish_pipe_device);
  522. dev->base = NULL;
  523. return 0;
  524. }
  525. static struct platform_driver goldfish_pipe = {
  526. .probe = goldfish_pipe_probe,
  527. .remove = goldfish_pipe_remove,
  528. .driver = {
  529. .name = "goldfish_pipe"
  530. }
  531. };
  532. module_platform_driver(goldfish_pipe);
  533. MODULE_AUTHOR("David Turner <digit@google.com>");
  534. MODULE_LICENSE("GPL");