|
@@ -795,89 +795,6 @@ out:
|
|
|
kfree(aio_work);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Write commit request unsafe callback, called to tell us when a
|
|
|
- * request is unsafe (that is, in flight--has been handed to the
|
|
|
- * messenger to send to its target osd). It is called again when
|
|
|
- * we've received a response message indicating the request is
|
|
|
- * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
|
|
|
- * is completed early (and unsuccessfully) due to a timeout or
|
|
|
- * interrupt.
|
|
|
- *
|
|
|
- * This is used if we requested both an ACK and ONDISK commit reply
|
|
|
- * from the OSD.
|
|
|
- */
|
|
|
-static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
|
|
|
-{
|
|
|
- struct ceph_inode_info *ci = ceph_inode(req->r_inode);
|
|
|
-
|
|
|
- dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
|
|
|
- unsafe ? "un" : "");
|
|
|
- if (unsafe) {
|
|
|
- ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
|
|
|
- spin_lock(&ci->i_unsafe_lock);
|
|
|
- list_add_tail(&req->r_unsafe_item,
|
|
|
- &ci->i_unsafe_writes);
|
|
|
- spin_unlock(&ci->i_unsafe_lock);
|
|
|
-
|
|
|
- complete_all(&req->r_completion);
|
|
|
- } else {
|
|
|
- spin_lock(&ci->i_unsafe_lock);
|
|
|
- list_del_init(&req->r_unsafe_item);
|
|
|
- spin_unlock(&ci->i_unsafe_lock);
|
|
|
- ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Wait on any unsafe replies for the given inode. First wait on the
|
|
|
- * newest request, and make that the upper bound. Then, if there are
|
|
|
- * more requests, keep waiting on the oldest as long as it is still older
|
|
|
- * than the original request.
|
|
|
- */
|
|
|
-void ceph_sync_write_wait(struct inode *inode)
|
|
|
-{
|
|
|
- struct ceph_inode_info *ci = ceph_inode(inode);
|
|
|
- struct list_head *head = &ci->i_unsafe_writes;
|
|
|
- struct ceph_osd_request *req;
|
|
|
- u64 last_tid;
|
|
|
-
|
|
|
- if (!S_ISREG(inode->i_mode))
|
|
|
- return;
|
|
|
-
|
|
|
- spin_lock(&ci->i_unsafe_lock);
|
|
|
- if (list_empty(head))
|
|
|
- goto out;
|
|
|
-
|
|
|
- /* set upper bound as _last_ entry in chain */
|
|
|
-
|
|
|
- req = list_last_entry(head, struct ceph_osd_request,
|
|
|
- r_unsafe_item);
|
|
|
- last_tid = req->r_tid;
|
|
|
-
|
|
|
- do {
|
|
|
- ceph_osdc_get_request(req);
|
|
|
- spin_unlock(&ci->i_unsafe_lock);
|
|
|
-
|
|
|
- dout("sync_write_wait on tid %llu (until %llu)\n",
|
|
|
- req->r_tid, last_tid);
|
|
|
- wait_for_completion(&req->r_done_completion);
|
|
|
- ceph_osdc_put_request(req);
|
|
|
-
|
|
|
- spin_lock(&ci->i_unsafe_lock);
|
|
|
- /*
|
|
|
- * from here on look at first entry in chain, since we
|
|
|
- * only want to wait for anything older than last_tid
|
|
|
- */
|
|
|
- if (list_empty(head))
|
|
|
- break;
|
|
|
- req = list_first_entry(head, struct ceph_osd_request,
|
|
|
- r_unsafe_item);
|
|
|
- } while (req->r_tid < last_tid);
|
|
|
-out:
|
|
|
- spin_unlock(&ci->i_unsafe_lock);
|
|
|
-}
|
|
|
-
|
|
|
static ssize_t
|
|
|
ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
|
|
|
struct ceph_snap_context *snapc,
|
|
@@ -1119,8 +1036,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
|
|
|
|
|
|
flags = CEPH_OSD_FLAG_ORDERSNAP |
|
|
|
CEPH_OSD_FLAG_ONDISK |
|
|
|
- CEPH_OSD_FLAG_WRITE |
|
|
|
- CEPH_OSD_FLAG_ACK;
|
|
|
+ CEPH_OSD_FLAG_WRITE;
|
|
|
|
|
|
while ((len = iov_iter_count(from)) > 0) {
|
|
|
size_t left;
|
|
@@ -1166,8 +1082,6 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- /* get a second commit callback */
|
|
|
- req->r_unsafe_callback = ceph_sync_write_unsafe;
|
|
|
req->r_inode = inode;
|
|
|
|
|
|
osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
|