aio.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. #ifndef __LINUX__AIO_H
  2. #define __LINUX__AIO_H
  3. #include <linux/list.h>
  4. #include <linux/workqueue.h>
  5. #include <linux/aio_abi.h>
  6. #include <linux/uio.h>
  7. #include <linux/rcupdate.h>
  8. #include <linux/atomic.h>
  9. struct kioctx;
  10. struct kiocb;
  11. #define KIOCB_KEY 0
  12. /*
  13. * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
  14. * cancelled or completed (this makes a certain amount of sense because
  15. * successful cancellation - io_cancel() - does deliver the completion to
  16. * userspace).
  17. *
  18. * And since most things don't implement kiocb cancellation and we'd really like
  19. * kiocb completion to be lockless when possible, we use ki_cancel to
  20. * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
  21. * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
  22. */
  23. #define KIOCB_CANCELLED ((void *) (~0ULL))
  24. typedef int (kiocb_cancel_fn)(struct kiocb *);
  25. struct kiocb {
  26. atomic_t ki_users;
  27. struct file *ki_filp;
  28. struct kioctx *ki_ctx; /* NULL for sync ops */
  29. kiocb_cancel_fn *ki_cancel;
  30. void (*ki_dtor)(struct kiocb *);
  31. union {
  32. void __user *user;
  33. struct task_struct *tsk;
  34. } ki_obj;
  35. __u64 ki_user_data; /* user's data for completion */
  36. loff_t ki_pos;
  37. void *private;
  38. /* State that we remember to be able to restart/retry */
  39. unsigned short ki_opcode;
  40. size_t ki_nbytes; /* copy of iocb->aio_nbytes */
  41. char __user *ki_buf; /* remaining iocb->aio_buf */
  42. size_t ki_left; /* remaining bytes */
  43. struct iovec ki_inline_vec; /* inline vector */
  44. struct iovec *ki_iovec;
  45. unsigned long ki_nr_segs;
  46. unsigned long ki_cur_seg;
  47. struct list_head ki_list; /* the aio core uses this
  48. * for cancellation */
  49. /*
  50. * If the aio_resfd field of the userspace iocb is not zero,
  51. * this is the underlying eventfd context to deliver events to.
  52. */
  53. struct eventfd_ctx *ki_eventfd;
  54. };
  55. static inline bool is_sync_kiocb(struct kiocb *kiocb)
  56. {
  57. return kiocb->ki_ctx == NULL;
  58. }
  59. static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
  60. {
  61. *kiocb = (struct kiocb) {
  62. .ki_users = ATOMIC_INIT(1),
  63. .ki_ctx = NULL,
  64. .ki_filp = filp,
  65. .ki_obj.tsk = current,
  66. };
  67. }
  68. /* prototypes */
  69. #ifdef CONFIG_AIO
  70. extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
  71. extern void aio_put_req(struct kiocb *iocb);
  72. extern void aio_complete(struct kiocb *iocb, long res, long res2);
  73. struct mm_struct;
  74. extern void exit_aio(struct mm_struct *mm);
  75. extern long do_io_submit(aio_context_t ctx_id, long nr,
  76. struct iocb __user *__user *iocbpp, bool compat);
  77. void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
  78. #else
  79. static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
  80. static inline void aio_put_req(struct kiocb *iocb) { }
  81. static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
  82. struct mm_struct;
  83. static inline void exit_aio(struct mm_struct *mm) { }
  84. static inline long do_io_submit(aio_context_t ctx_id, long nr,
  85. struct iocb __user * __user *iocbpp,
  86. bool compat) { return 0; }
  87. static inline void kiocb_set_cancel_fn(struct kiocb *req,
  88. kiocb_cancel_fn *cancel) { }
  89. #endif /* CONFIG_AIO */
  90. static inline struct kiocb *list_kiocb(struct list_head *h)
  91. {
  92. return list_entry(h, struct kiocb, ki_list);
  93. }
  94. /* for sysctl: */
  95. extern unsigned long aio_nr;
  96. extern unsigned long aio_max_nr;
  97. #endif /* __LINUX__AIO_H */