|
@@ -83,6 +83,25 @@ struct rds_ib_mr_pool {
|
|
struct ib_fmr_attr fmr_attr;
|
|
struct ib_fmr_attr fmr_attr;
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+struct workqueue_struct *rds_ib_fmr_wq;
|
|
|
|
+
|
|
|
|
+int rds_ib_fmr_init(void)
|
|
|
|
+{
|
|
|
|
+ rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
|
|
|
|
+ if (!rds_ib_fmr_wq)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* By the time this is called all the IB devices should have been torn down and
|
|
|
|
+ * had their pools freed. As each pool is freed its work struct is waited on,
|
|
|
|
+ * so the pool flushing work queue should be idle by the time we get here.
|
|
|
|
+ */
|
|
|
|
+void rds_ib_fmr_exit(void)
|
|
|
|
+{
|
|
|
|
+ destroy_workqueue(rds_ib_fmr_wq);
|
|
|
|
+}
|
|
|
|
+
|
|
static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
|
|
static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
|
|
static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
|
|
static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
|
|
static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
|
|
static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
|
|
@@ -719,15 +738,17 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
|
|
/* If we've pinned too many pages, request a flush */
|
|
/* If we've pinned too many pages, request a flush */
|
|
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
|
|
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
|
|
atomic_read(&pool->dirty_count) >= pool->max_items / 10)
|
|
atomic_read(&pool->dirty_count) >= pool->max_items / 10)
|
|
- schedule_delayed_work(&pool->flush_worker, 10);
|
|
|
|
|
|
+ queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
|
|
|
|
|
|
if (invalidate) {
|
|
if (invalidate) {
|
|
if (likely(!in_interrupt())) {
|
|
if (likely(!in_interrupt())) {
|
|
rds_ib_flush_mr_pool(pool, 0, NULL);
|
|
rds_ib_flush_mr_pool(pool, 0, NULL);
|
|
} else {
|
|
} else {
|
|
/* We get here if the user created a MR marked
|
|
/* We get here if the user created a MR marked
|
|
- * as use_once and invalidate at the same time. */
|
|
|
|
- schedule_delayed_work(&pool->flush_worker, 10);
|
|
|
|
|
|
+ * as use_once and invalidate at the same time.
|
|
|
|
+ */
|
|
|
|
+ queue_delayed_work(rds_ib_fmr_wq,
|
|
|
|
+ &pool->flush_worker, 10);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|