mem.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901
  1. /*
  2. * linux/drivers/char/mem.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * Added devfs support.
  7. * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  8. * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/miscdevice.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/mman.h>
  15. #include <linux/random.h>
  16. #include <linux/init.h>
  17. #include <linux/raw.h>
  18. #include <linux/tty.h>
  19. #include <linux/capability.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/device.h>
  22. #include <linux/highmem.h>
  23. #include <linux/backing-dev.h>
  24. #include <linux/splice.h>
  25. #include <linux/pfn.h>
  26. #include <linux/export.h>
  27. #include <linux/io.h>
  28. #include <linux/aio.h>
  29. #include <asm/uaccess.h>
  30. #ifdef CONFIG_IA64
  31. # include <linux/efi.h>
  32. #endif
  33. #define DEVPORT_MINOR 4
  34. static inline unsigned long size_inside_page(unsigned long start,
  35. unsigned long size)
  36. {
  37. unsigned long sz;
  38. sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
  39. return min(sz, size);
  40. }
  41. #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  42. static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
  43. {
  44. return addr + count <= __pa(high_memory);
  45. }
  46. static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  47. {
  48. return 1;
  49. }
  50. #endif
  51. #ifdef CONFIG_STRICT_DEVMEM
  52. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  53. {
  54. u64 from = ((u64)pfn) << PAGE_SHIFT;
  55. u64 to = from + size;
  56. u64 cursor = from;
  57. while (cursor < to) {
  58. if (!devmem_is_allowed(pfn)) {
  59. printk(KERN_INFO
  60. "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
  61. current->comm, from, to);
  62. return 0;
  63. }
  64. cursor += PAGE_SIZE;
  65. pfn++;
  66. }
  67. return 1;
  68. }
  69. #else
  70. static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  71. {
  72. return 1;
  73. }
  74. #endif
  75. void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
  76. {
  77. }
  78. /*
  79. * This funcion reads the *physical* memory. The f_pos points directly to the
  80. * memory location.
  81. */
  82. static ssize_t read_mem(struct file *file, char __user *buf,
  83. size_t count, loff_t *ppos)
  84. {
  85. phys_addr_t p = *ppos;
  86. ssize_t read, sz;
  87. char *ptr;
  88. if (!valid_phys_addr_range(p, count))
  89. return -EFAULT;
  90. read = 0;
  91. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  92. /* we don't have page 0 mapped on sparc and m68k.. */
  93. if (p < PAGE_SIZE) {
  94. sz = size_inside_page(p, count);
  95. if (sz > 0) {
  96. if (clear_user(buf, sz))
  97. return -EFAULT;
  98. buf += sz;
  99. p += sz;
  100. count -= sz;
  101. read += sz;
  102. }
  103. }
  104. #endif
  105. while (count > 0) {
  106. unsigned long remaining;
  107. sz = size_inside_page(p, count);
  108. if (!range_is_allowed(p >> PAGE_SHIFT, count))
  109. return -EPERM;
  110. /*
  111. * On ia64 if a page has been mapped somewhere as uncached, then
  112. * it must also be accessed uncached by the kernel or data
  113. * corruption may occur.
  114. */
  115. ptr = xlate_dev_mem_ptr(p);
  116. if (!ptr)
  117. return -EFAULT;
  118. remaining = copy_to_user(buf, ptr, sz);
  119. unxlate_dev_mem_ptr(p, ptr);
  120. if (remaining)
  121. return -EFAULT;
  122. buf += sz;
  123. p += sz;
  124. count -= sz;
  125. read += sz;
  126. }
  127. *ppos += read;
  128. return read;
  129. }
  130. static ssize_t write_mem(struct file *file, const char __user *buf,
  131. size_t count, loff_t *ppos)
  132. {
  133. phys_addr_t p = *ppos;
  134. ssize_t written, sz;
  135. unsigned long copied;
  136. void *ptr;
  137. if (!valid_phys_addr_range(p, count))
  138. return -EFAULT;
  139. written = 0;
  140. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  141. /* we don't have page 0 mapped on sparc and m68k.. */
  142. if (p < PAGE_SIZE) {
  143. sz = size_inside_page(p, count);
  144. /* Hmm. Do something? */
  145. buf += sz;
  146. p += sz;
  147. count -= sz;
  148. written += sz;
  149. }
  150. #endif
  151. while (count > 0) {
  152. sz = size_inside_page(p, count);
  153. if (!range_is_allowed(p >> PAGE_SHIFT, sz))
  154. return -EPERM;
  155. /*
  156. * On ia64 if a page has been mapped somewhere as uncached, then
  157. * it must also be accessed uncached by the kernel or data
  158. * corruption may occur.
  159. */
  160. ptr = xlate_dev_mem_ptr(p);
  161. if (!ptr) {
  162. if (written)
  163. break;
  164. return -EFAULT;
  165. }
  166. copied = copy_from_user(ptr, buf, sz);
  167. unxlate_dev_mem_ptr(p, ptr);
  168. if (copied) {
  169. written += sz - copied;
  170. if (written)
  171. break;
  172. return -EFAULT;
  173. }
  174. buf += sz;
  175. p += sz;
  176. count -= sz;
  177. written += sz;
  178. }
  179. *ppos += written;
  180. return written;
  181. }
  182. int __weak phys_mem_access_prot_allowed(struct file *file,
  183. unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
  184. {
  185. return 1;
  186. }
  187. #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
  188. /*
  189. * Architectures vary in how they handle caching for addresses
  190. * outside of main memory.
  191. *
  192. */
  193. #ifdef pgprot_noncached
  194. static int uncached_access(struct file *file, phys_addr_t addr)
  195. {
  196. #if defined(CONFIG_IA64)
  197. /*
  198. * On ia64, we ignore O_DSYNC because we cannot tolerate memory
  199. * attribute aliases.
  200. */
  201. return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
  202. #elif defined(CONFIG_MIPS)
  203. {
  204. extern int __uncached_access(struct file *file,
  205. unsigned long addr);
  206. return __uncached_access(file, addr);
  207. }
  208. #else
  209. /*
  210. * Accessing memory above the top the kernel knows about or through a
  211. * file pointer
  212. * that was marked O_DSYNC will be done non-cached.
  213. */
  214. if (file->f_flags & O_DSYNC)
  215. return 1;
  216. return addr >= __pa(high_memory);
  217. #endif
  218. }
  219. #endif
  220. static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  221. unsigned long size, pgprot_t vma_prot)
  222. {
  223. #ifdef pgprot_noncached
  224. phys_addr_t offset = pfn << PAGE_SHIFT;
  225. if (uncached_access(file, offset))
  226. return pgprot_noncached(vma_prot);
  227. #endif
  228. return vma_prot;
  229. }
  230. #endif
  231. #ifndef CONFIG_MMU
  232. static unsigned long get_unmapped_area_mem(struct file *file,
  233. unsigned long addr,
  234. unsigned long len,
  235. unsigned long pgoff,
  236. unsigned long flags)
  237. {
  238. if (!valid_mmap_phys_addr_range(pgoff, len))
  239. return (unsigned long) -EINVAL;
  240. return pgoff << PAGE_SHIFT;
  241. }
  242. /* can't do an in-place private mapping if there's no MMU */
  243. static inline int private_mapping_ok(struct vm_area_struct *vma)
  244. {
  245. return vma->vm_flags & VM_MAYSHARE;
  246. }
  247. #else
  248. #define get_unmapped_area_mem NULL
  249. static inline int private_mapping_ok(struct vm_area_struct *vma)
  250. {
  251. return 1;
  252. }
  253. #endif
  254. static const struct vm_operations_struct mmap_mem_ops = {
  255. #ifdef CONFIG_HAVE_IOREMAP_PROT
  256. .access = generic_access_phys
  257. #endif
  258. };
  259. static int mmap_mem(struct file *file, struct vm_area_struct *vma)
  260. {
  261. size_t size = vma->vm_end - vma->vm_start;
  262. if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
  263. return -EINVAL;
  264. if (!private_mapping_ok(vma))
  265. return -ENOSYS;
  266. if (!range_is_allowed(vma->vm_pgoff, size))
  267. return -EPERM;
  268. if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
  269. &vma->vm_page_prot))
  270. return -EINVAL;
  271. vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
  272. size,
  273. vma->vm_page_prot);
  274. vma->vm_ops = &mmap_mem_ops;
  275. /* Remap-pfn-range will mark the range VM_IO */
  276. if (remap_pfn_range(vma,
  277. vma->vm_start,
  278. vma->vm_pgoff,
  279. size,
  280. vma->vm_page_prot)) {
  281. return -EAGAIN;
  282. }
  283. return 0;
  284. }
  285. #ifdef CONFIG_DEVKMEM
  286. static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
  287. {
  288. unsigned long pfn;
  289. /* Turn a kernel-virtual address into a physical page frame */
  290. pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
  291. /*
  292. * RED-PEN: on some architectures there is more mapped memory than
  293. * available in mem_map which pfn_valid checks for. Perhaps should add a
  294. * new macro here.
  295. *
  296. * RED-PEN: vmalloc is not supported right now.
  297. */
  298. if (!pfn_valid(pfn))
  299. return -EIO;
  300. vma->vm_pgoff = pfn;
  301. return mmap_mem(file, vma);
  302. }
  303. #endif
  304. #ifdef CONFIG_DEVKMEM
  305. /*
  306. * This function reads the *virtual* memory as seen by the kernel.
  307. */
  308. static ssize_t read_kmem(struct file *file, char __user *buf,
  309. size_t count, loff_t *ppos)
  310. {
  311. unsigned long p = *ppos;
  312. ssize_t low_count, read, sz;
  313. char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
  314. int err = 0;
  315. read = 0;
  316. if (p < (unsigned long) high_memory) {
  317. low_count = count;
  318. if (count > (unsigned long)high_memory - p)
  319. low_count = (unsigned long)high_memory - p;
  320. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  321. /* we don't have page 0 mapped on sparc and m68k.. */
  322. if (p < PAGE_SIZE && low_count > 0) {
  323. sz = size_inside_page(p, low_count);
  324. if (clear_user(buf, sz))
  325. return -EFAULT;
  326. buf += sz;
  327. p += sz;
  328. read += sz;
  329. low_count -= sz;
  330. count -= sz;
  331. }
  332. #endif
  333. while (low_count > 0) {
  334. sz = size_inside_page(p, low_count);
  335. /*
  336. * On ia64 if a page has been mapped somewhere as
  337. * uncached, then it must also be accessed uncached
  338. * by the kernel or data corruption may occur
  339. */
  340. kbuf = xlate_dev_kmem_ptr((char *)p);
  341. if (copy_to_user(buf, kbuf, sz))
  342. return -EFAULT;
  343. buf += sz;
  344. p += sz;
  345. read += sz;
  346. low_count -= sz;
  347. count -= sz;
  348. }
  349. }
  350. if (count > 0) {
  351. kbuf = (char *)__get_free_page(GFP_KERNEL);
  352. if (!kbuf)
  353. return -ENOMEM;
  354. while (count > 0) {
  355. sz = size_inside_page(p, count);
  356. if (!is_vmalloc_or_module_addr((void *)p)) {
  357. err = -ENXIO;
  358. break;
  359. }
  360. sz = vread(kbuf, (char *)p, sz);
  361. if (!sz)
  362. break;
  363. if (copy_to_user(buf, kbuf, sz)) {
  364. err = -EFAULT;
  365. break;
  366. }
  367. count -= sz;
  368. buf += sz;
  369. read += sz;
  370. p += sz;
  371. }
  372. free_page((unsigned long)kbuf);
  373. }
  374. *ppos = p;
  375. return read ? read : err;
  376. }
  377. static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
  378. size_t count, loff_t *ppos)
  379. {
  380. ssize_t written, sz;
  381. unsigned long copied;
  382. written = 0;
  383. #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
  384. /* we don't have page 0 mapped on sparc and m68k.. */
  385. if (p < PAGE_SIZE) {
  386. sz = size_inside_page(p, count);
  387. /* Hmm. Do something? */
  388. buf += sz;
  389. p += sz;
  390. count -= sz;
  391. written += sz;
  392. }
  393. #endif
  394. while (count > 0) {
  395. char *ptr;
  396. sz = size_inside_page(p, count);
  397. /*
  398. * On ia64 if a page has been mapped somewhere as uncached, then
  399. * it must also be accessed uncached by the kernel or data
  400. * corruption may occur.
  401. */
  402. ptr = xlate_dev_kmem_ptr((char *)p);
  403. copied = copy_from_user(ptr, buf, sz);
  404. if (copied) {
  405. written += sz - copied;
  406. if (written)
  407. break;
  408. return -EFAULT;
  409. }
  410. buf += sz;
  411. p += sz;
  412. count -= sz;
  413. written += sz;
  414. }
  415. *ppos += written;
  416. return written;
  417. }
  418. /*
  419. * This function writes to the *virtual* memory as seen by the kernel.
  420. */
  421. static ssize_t write_kmem(struct file *file, const char __user *buf,
  422. size_t count, loff_t *ppos)
  423. {
  424. unsigned long p = *ppos;
  425. ssize_t wrote = 0;
  426. ssize_t virtr = 0;
  427. char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
  428. int err = 0;
  429. if (p < (unsigned long) high_memory) {
  430. unsigned long to_write = min_t(unsigned long, count,
  431. (unsigned long)high_memory - p);
  432. wrote = do_write_kmem(p, buf, to_write, ppos);
  433. if (wrote != to_write)
  434. return wrote;
  435. p += wrote;
  436. buf += wrote;
  437. count -= wrote;
  438. }
  439. if (count > 0) {
  440. kbuf = (char *)__get_free_page(GFP_KERNEL);
  441. if (!kbuf)
  442. return wrote ? wrote : -ENOMEM;
  443. while (count > 0) {
  444. unsigned long sz = size_inside_page(p, count);
  445. unsigned long n;
  446. if (!is_vmalloc_or_module_addr((void *)p)) {
  447. err = -ENXIO;
  448. break;
  449. }
  450. n = copy_from_user(kbuf, buf, sz);
  451. if (n) {
  452. err = -EFAULT;
  453. break;
  454. }
  455. vwrite(kbuf, (char *)p, sz);
  456. count -= sz;
  457. buf += sz;
  458. virtr += sz;
  459. p += sz;
  460. }
  461. free_page((unsigned long)kbuf);
  462. }
  463. *ppos = p;
  464. return virtr + wrote ? : err;
  465. }
  466. #endif
  467. #ifdef CONFIG_DEVPORT
  468. static ssize_t read_port(struct file *file, char __user *buf,
  469. size_t count, loff_t *ppos)
  470. {
  471. unsigned long i = *ppos;
  472. char __user *tmp = buf;
  473. if (!access_ok(VERIFY_WRITE, buf, count))
  474. return -EFAULT;
  475. while (count-- > 0 && i < 65536) {
  476. if (__put_user(inb(i), tmp) < 0)
  477. return -EFAULT;
  478. i++;
  479. tmp++;
  480. }
  481. *ppos = i;
  482. return tmp-buf;
  483. }
  484. static ssize_t write_port(struct file *file, const char __user *buf,
  485. size_t count, loff_t *ppos)
  486. {
  487. unsigned long i = *ppos;
  488. const char __user *tmp = buf;
  489. if (!access_ok(VERIFY_READ, buf, count))
  490. return -EFAULT;
  491. while (count-- > 0 && i < 65536) {
  492. char c;
  493. if (__get_user(c, tmp)) {
  494. if (tmp > buf)
  495. break;
  496. return -EFAULT;
  497. }
  498. outb(c, i);
  499. i++;
  500. tmp++;
  501. }
  502. *ppos = i;
  503. return tmp-buf;
  504. }
  505. #endif
  506. static ssize_t read_null(struct file *file, char __user *buf,
  507. size_t count, loff_t *ppos)
  508. {
  509. return 0;
  510. }
  511. static ssize_t write_null(struct file *file, const char __user *buf,
  512. size_t count, loff_t *ppos)
  513. {
  514. return count;
  515. }
  516. static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
  517. unsigned long nr_segs, loff_t pos)
  518. {
  519. return 0;
  520. }
  521. static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
  522. unsigned long nr_segs, loff_t pos)
  523. {
  524. return iov_length(iov, nr_segs);
  525. }
  526. static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
  527. struct splice_desc *sd)
  528. {
  529. return sd->len;
  530. }
  531. static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
  532. loff_t *ppos, size_t len, unsigned int flags)
  533. {
  534. return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
  535. }
  536. static ssize_t read_zero(struct file *file, char __user *buf,
  537. size_t count, loff_t *ppos)
  538. {
  539. size_t written;
  540. if (!count)
  541. return 0;
  542. if (!access_ok(VERIFY_WRITE, buf, count))
  543. return -EFAULT;
  544. written = 0;
  545. while (count) {
  546. unsigned long unwritten;
  547. size_t chunk = count;
  548. if (chunk > PAGE_SIZE)
  549. chunk = PAGE_SIZE; /* Just for latency reasons */
  550. unwritten = __clear_user(buf, chunk);
  551. written += chunk - unwritten;
  552. if (unwritten)
  553. break;
  554. if (signal_pending(current))
  555. return written ? written : -ERESTARTSYS;
  556. buf += chunk;
  557. count -= chunk;
  558. cond_resched();
  559. }
  560. return written ? written : -EFAULT;
  561. }
  562. static ssize_t aio_read_zero(struct kiocb *iocb, const struct iovec *iov,
  563. unsigned long nr_segs, loff_t pos)
  564. {
  565. size_t written = 0;
  566. unsigned long i;
  567. ssize_t ret;
  568. for (i = 0; i < nr_segs; i++) {
  569. ret = read_zero(iocb->ki_filp, iov[i].iov_base, iov[i].iov_len,
  570. &pos);
  571. if (ret < 0)
  572. break;
  573. written += ret;
  574. }
  575. return written ? written : -EFAULT;
  576. }
  577. static int mmap_zero(struct file *file, struct vm_area_struct *vma)
  578. {
  579. #ifndef CONFIG_MMU
  580. return -ENOSYS;
  581. #endif
  582. if (vma->vm_flags & VM_SHARED)
  583. return shmem_zero_setup(vma);
  584. return 0;
  585. }
  586. static ssize_t write_full(struct file *file, const char __user *buf,
  587. size_t count, loff_t *ppos)
  588. {
  589. return -ENOSPC;
  590. }
  591. /*
  592. * Special lseek() function for /dev/null and /dev/zero. Most notably, you
  593. * can fopen() both devices with "a" now. This was previously impossible.
  594. * -- SRB.
  595. */
  596. static loff_t null_lseek(struct file *file, loff_t offset, int orig)
  597. {
  598. return file->f_pos = 0;
  599. }
  600. /*
  601. * The memory devices use the full 32/64 bits of the offset, and so we cannot
  602. * check against negative addresses: they are ok. The return value is weird,
  603. * though, in that case (0).
  604. *
  605. * also note that seeking relative to the "end of file" isn't supported:
  606. * it has no meaning, so it returns -EINVAL.
  607. */
  608. static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
  609. {
  610. loff_t ret;
  611. mutex_lock(&file_inode(file)->i_mutex);
  612. switch (orig) {
  613. case SEEK_CUR:
  614. offset += file->f_pos;
  615. case SEEK_SET:
  616. /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
  617. if (IS_ERR_VALUE((unsigned long long)offset)) {
  618. ret = -EOVERFLOW;
  619. break;
  620. }
  621. file->f_pos = offset;
  622. ret = file->f_pos;
  623. force_successful_syscall_return();
  624. break;
  625. default:
  626. ret = -EINVAL;
  627. }
  628. mutex_unlock(&file_inode(file)->i_mutex);
  629. return ret;
  630. }
  631. static int open_port(struct inode *inode, struct file *filp)
  632. {
  633. return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
  634. }
  635. #define zero_lseek null_lseek
  636. #define full_lseek null_lseek
  637. #define write_zero write_null
  638. #define read_full read_zero
  639. #define aio_write_zero aio_write_null
  640. #define open_mem open_port
  641. #define open_kmem open_mem
  642. static const struct file_operations mem_fops = {
  643. .llseek = memory_lseek,
  644. .read = read_mem,
  645. .write = write_mem,
  646. .mmap = mmap_mem,
  647. .open = open_mem,
  648. .get_unmapped_area = get_unmapped_area_mem,
  649. };
  650. #ifdef CONFIG_DEVKMEM
  651. static const struct file_operations kmem_fops = {
  652. .llseek = memory_lseek,
  653. .read = read_kmem,
  654. .write = write_kmem,
  655. .mmap = mmap_kmem,
  656. .open = open_kmem,
  657. .get_unmapped_area = get_unmapped_area_mem,
  658. };
  659. #endif
  660. static const struct file_operations null_fops = {
  661. .llseek = null_lseek,
  662. .read = read_null,
  663. .write = write_null,
  664. .aio_read = aio_read_null,
  665. .aio_write = aio_write_null,
  666. .splice_write = splice_write_null,
  667. };
  668. #ifdef CONFIG_DEVPORT
  669. static const struct file_operations port_fops = {
  670. .llseek = memory_lseek,
  671. .read = read_port,
  672. .write = write_port,
  673. .open = open_port,
  674. };
  675. #endif
  676. static const struct file_operations zero_fops = {
  677. .llseek = zero_lseek,
  678. .read = read_zero,
  679. .write = write_zero,
  680. .aio_read = aio_read_zero,
  681. .aio_write = aio_write_zero,
  682. .mmap = mmap_zero,
  683. };
  684. /*
  685. * capabilities for /dev/zero
  686. * - permits private mappings, "copies" are taken of the source of zeros
  687. * - no writeback happens
  688. */
  689. static struct backing_dev_info zero_bdi = {
  690. .name = "char/mem",
  691. .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
  692. };
  693. static const struct file_operations full_fops = {
  694. .llseek = full_lseek,
  695. .read = read_full,
  696. .write = write_full,
  697. };
  698. static const struct memdev {
  699. const char *name;
  700. umode_t mode;
  701. const struct file_operations *fops;
  702. struct backing_dev_info *dev_info;
  703. } devlist[] = {
  704. [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
  705. #ifdef CONFIG_DEVKMEM
  706. [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
  707. #endif
  708. [3] = { "null", 0666, &null_fops, NULL },
  709. #ifdef CONFIG_DEVPORT
  710. [4] = { "port", 0, &port_fops, NULL },
  711. #endif
  712. [5] = { "zero", 0666, &zero_fops, &zero_bdi },
  713. [7] = { "full", 0666, &full_fops, NULL },
  714. [8] = { "random", 0666, &random_fops, NULL },
  715. [9] = { "urandom", 0666, &urandom_fops, NULL },
  716. #ifdef CONFIG_PRINTK
  717. [11] = { "kmsg", 0644, &kmsg_fops, NULL },
  718. #endif
  719. };
  720. static int memory_open(struct inode *inode, struct file *filp)
  721. {
  722. int minor;
  723. const struct memdev *dev;
  724. minor = iminor(inode);
  725. if (minor >= ARRAY_SIZE(devlist))
  726. return -ENXIO;
  727. dev = &devlist[minor];
  728. if (!dev->fops)
  729. return -ENXIO;
  730. filp->f_op = dev->fops;
  731. if (dev->dev_info)
  732. filp->f_mapping->backing_dev_info = dev->dev_info;
  733. /* Is /dev/mem or /dev/kmem ? */
  734. if (dev->dev_info == &directly_mappable_cdev_bdi)
  735. filp->f_mode |= FMODE_UNSIGNED_OFFSET;
  736. if (dev->fops->open)
  737. return dev->fops->open(inode, filp);
  738. return 0;
  739. }
  740. static const struct file_operations memory_fops = {
  741. .open = memory_open,
  742. .llseek = noop_llseek,
  743. };
  744. static char *mem_devnode(struct device *dev, umode_t *mode)
  745. {
  746. if (mode && devlist[MINOR(dev->devt)].mode)
  747. *mode = devlist[MINOR(dev->devt)].mode;
  748. return NULL;
  749. }
  750. static struct class *mem_class;
  751. static int __init chr_dev_init(void)
  752. {
  753. int minor;
  754. int err;
  755. err = bdi_init(&zero_bdi);
  756. if (err)
  757. return err;
  758. if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
  759. printk("unable to get major %d for memory devs\n", MEM_MAJOR);
  760. mem_class = class_create(THIS_MODULE, "mem");
  761. if (IS_ERR(mem_class))
  762. return PTR_ERR(mem_class);
  763. mem_class->devnode = mem_devnode;
  764. for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
  765. if (!devlist[minor].name)
  766. continue;
  767. /*
  768. * Create /dev/port?
  769. */
  770. if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
  771. continue;
  772. device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
  773. NULL, devlist[minor].name);
  774. }
  775. return tty_init();
  776. }
  777. fs_initcall(chr_dev_init);