|
@@ -33,6 +33,7 @@
|
|
#include "xfs_error.h"
|
|
#include "xfs_error.h"
|
|
#include "xfs_trace.h"
|
|
#include "xfs_trace.h"
|
|
#include "xfs_cksum.h"
|
|
#include "xfs_cksum.h"
|
|
|
|
+#include "xfs_alloc.h"
|
|
|
|
|
|
/*
|
|
/*
|
|
* Cursor allocation zone.
|
|
* Cursor allocation zone.
|
|
@@ -2323,7 +2324,7 @@ error1:
|
|
* record (to be inserted into parent).
|
|
* record (to be inserted into parent).
|
|
*/
|
|
*/
|
|
STATIC int /* error */
|
|
STATIC int /* error */
|
|
-xfs_btree_split(
|
|
|
|
|
|
+__xfs_btree_split(
|
|
struct xfs_btree_cur *cur,
|
|
struct xfs_btree_cur *cur,
|
|
int level,
|
|
int level,
|
|
union xfs_btree_ptr *ptrp,
|
|
union xfs_btree_ptr *ptrp,
|
|
@@ -2503,6 +2504,85 @@ error0:
|
|
return error;
|
|
return error;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+struct xfs_btree_split_args {
|
|
|
|
+ struct xfs_btree_cur *cur;
|
|
|
|
+ int level;
|
|
|
|
+ union xfs_btree_ptr *ptrp;
|
|
|
|
+ union xfs_btree_key *key;
|
|
|
|
+ struct xfs_btree_cur **curp;
|
|
|
|
+ int *stat; /* success/failure */
|
|
|
|
+ int result;
|
|
|
|
+ bool kswapd; /* allocation in kswapd context */
|
|
|
|
+ struct completion *done;
|
|
|
|
+ struct work_struct work;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Stack switching interfaces for allocation
|
|
|
|
+ */
|
|
|
|
+static void
|
|
|
|
+xfs_btree_split_worker(
|
|
|
|
+ struct work_struct *work)
|
|
|
|
+{
|
|
|
|
+ struct xfs_btree_split_args *args = container_of(work,
|
|
|
|
+ struct xfs_btree_split_args, work);
|
|
|
|
+ unsigned long pflags;
|
|
|
|
+ unsigned long new_pflags = PF_FSTRANS;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * we are in a transaction context here, but may also be doing work
|
|
|
|
+ * in kswapd context, and hence we may need to inherit that state
|
|
|
|
+ * temporarily to ensure that we don't block waiting for memory reclaim
|
|
|
|
+ * in any way.
|
|
|
|
+ */
|
|
|
|
+ if (args->kswapd)
|
|
|
|
+ new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
|
|
|
|
+
|
|
|
|
+ current_set_flags_nested(&pflags, new_pflags);
|
|
|
|
+
|
|
|
|
+ args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
|
|
|
|
+ args->key, args->curp, args->stat);
|
|
|
|
+ complete(args->done);
|
|
|
|
+
|
|
|
|
+ current_restore_flags_nested(&pflags, new_pflags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * BMBT split requests often come in with little stack to work on. Push
|
|
|
|
+ * them off to a worker thread so there is lots of stack to use. For the other
|
|
|
|
+ * btree types, just call directly to avoid the context switch overhead here.
|
|
|
|
+ */
|
|
|
|
+STATIC int /* error */
|
|
|
|
+xfs_btree_split(
|
|
|
|
+ struct xfs_btree_cur *cur,
|
|
|
|
+ int level,
|
|
|
|
+ union xfs_btree_ptr *ptrp,
|
|
|
|
+ union xfs_btree_key *key,
|
|
|
|
+ struct xfs_btree_cur **curp,
|
|
|
|
+ int *stat) /* success/failure */
|
|
|
|
+{
|
|
|
|
+ struct xfs_btree_split_args args;
|
|
|
|
+ DECLARE_COMPLETION_ONSTACK(done);
|
|
|
|
+
|
|
|
|
+ if (cur->bc_btnum != XFS_BTNUM_BMAP)
|
|
|
|
+ return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
|
|
|
|
+
|
|
|
|
+ args.cur = cur;
|
|
|
|
+ args.level = level;
|
|
|
|
+ args.ptrp = ptrp;
|
|
|
|
+ args.key = key;
|
|
|
|
+ args.curp = curp;
|
|
|
|
+ args.stat = stat;
|
|
|
|
+ args.done = &done;
|
|
|
|
+ args.kswapd = current_is_kswapd();
|
|
|
|
+ INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
|
|
|
|
+ queue_work(xfs_alloc_wq, &args.work);
|
|
|
|
+ wait_for_completion(&done);
|
|
|
|
+ destroy_work_on_stack(&args.work);
|
|
|
|
+ return args.result;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Copy the old inode root contents into a real block and make the
|
|
* Copy the old inode root contents into a real block and make the
|
|
* broot point to it.
|
|
* broot point to it.
|