|
@@ -1112,6 +1112,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
|
|
|
init_waitqueue_head(&udev->nl_cmd_wq);
|
|
|
spin_lock_init(&udev->nl_cmd_lock);
|
|
|
|
|
|
+ INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
|
|
|
+
|
|
|
return &udev->se_dev;
|
|
|
}
|
|
|
|
|
@@ -1280,10 +1282,54 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
|
|
|
kfree(udev);
|
|
|
}
|
|
|
|
|
|
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
|
|
|
+{
|
|
|
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
|
|
+ kmem_cache_free(tcmu_cmd_cache, cmd);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ return -EINVAL;
|
|
|
+}
|
|
|
+
|
|
|
+static void tcmu_blocks_release(struct tcmu_dev *udev)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ /* Try to release all block pages */
|
|
|
+ mutex_lock(&udev->cmdr_lock);
|
|
|
+ for (i = 0; i <= udev->dbi_max; i++) {
|
|
|
+ page = radix_tree_delete(&udev->data_blocks, i);
|
|
|
+ if (page) {
|
|
|
+ __free_page(page);
|
|
|
+ atomic_dec(&global_db_count);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ mutex_unlock(&udev->cmdr_lock);
|
|
|
+}
|
|
|
+
|
|
|
static void tcmu_dev_kref_release(struct kref *kref)
|
|
|
{
|
|
|
struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
|
|
|
struct se_device *dev = &udev->se_dev;
|
|
|
+ struct tcmu_cmd *cmd;
|
|
|
+ bool all_expired = true;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ vfree(udev->mb_addr);
|
|
|
+ udev->mb_addr = NULL;
|
|
|
+
|
|
|
+ /* Upper layer should drain all requests before calling this */
|
|
|
+ spin_lock_irq(&udev->commands_lock);
|
|
|
+ idr_for_each_entry(&udev->commands, cmd, i) {
|
|
|
+ if (tcmu_check_and_free_pending_cmd(cmd) != 0)
|
|
|
+ all_expired = false;
|
|
|
+ }
|
|
|
+ idr_destroy(&udev->commands);
|
|
|
+ spin_unlock_irq(&udev->commands_lock);
|
|
|
+ WARN_ON(!all_expired);
|
|
|
+
|
|
|
+ tcmu_blocks_release(udev);
|
|
|
|
|
|
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
|
|
|
}
|
|
@@ -1476,8 +1522,6 @@ static int tcmu_configure_device(struct se_device *dev)
|
|
|
WARN_ON(udev->data_size % PAGE_SIZE);
|
|
|
WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
|
|
|
|
|
|
- INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
|
|
|
-
|
|
|
info->version = __stringify(TCMU_MAILBOX_VERSION);
|
|
|
|
|
|
info->mem[0].name = "tcm-user command & data buffer";
|
|
@@ -1527,6 +1571,7 @@ err_netlink:
|
|
|
uio_unregister_device(&udev->uio_info);
|
|
|
err_register:
|
|
|
vfree(udev->mb_addr);
|
|
|
+ udev->mb_addr = NULL;
|
|
|
err_vzalloc:
|
|
|
kfree(info->name);
|
|
|
info->name = NULL;
|
|
@@ -1534,37 +1579,11 @@ err_vzalloc:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
|
|
|
-{
|
|
|
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
|
|
- kmem_cache_free(tcmu_cmd_cache, cmd);
|
|
|
- return 0;
|
|
|
- }
|
|
|
- return -EINVAL;
|
|
|
-}
|
|
|
-
|
|
|
static bool tcmu_dev_configured(struct tcmu_dev *udev)
|
|
|
{
|
|
|
return udev->uio_info.uio_dev ? true : false;
|
|
|
}
|
|
|
|
|
|
-static void tcmu_blocks_release(struct tcmu_dev *udev)
|
|
|
-{
|
|
|
- int i;
|
|
|
- struct page *page;
|
|
|
-
|
|
|
- /* Try to release all block pages */
|
|
|
- mutex_lock(&udev->cmdr_lock);
|
|
|
- for (i = 0; i <= udev->dbi_max; i++) {
|
|
|
- page = radix_tree_delete(&udev->data_blocks, i);
|
|
|
- if (page) {
|
|
|
- __free_page(page);
|
|
|
- atomic_dec(&global_db_count);
|
|
|
- }
|
|
|
- }
|
|
|
- mutex_unlock(&udev->cmdr_lock);
|
|
|
-}
|
|
|
-
|
|
|
static void tcmu_free_device(struct se_device *dev)
|
|
|
{
|
|
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
|
@@ -1576,9 +1595,6 @@ static void tcmu_free_device(struct se_device *dev)
|
|
|
static void tcmu_destroy_device(struct se_device *dev)
|
|
|
{
|
|
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
|
|
- struct tcmu_cmd *cmd;
|
|
|
- bool all_expired = true;
|
|
|
- int i;
|
|
|
|
|
|
del_timer_sync(&udev->timeout);
|
|
|
|
|
@@ -1586,20 +1602,6 @@ static void tcmu_destroy_device(struct se_device *dev)
|
|
|
list_del(&udev->node);
|
|
|
mutex_unlock(&root_udev_mutex);
|
|
|
|
|
|
- vfree(udev->mb_addr);
|
|
|
-
|
|
|
- /* Upper layer should drain all requests before calling this */
|
|
|
- spin_lock_irq(&udev->commands_lock);
|
|
|
- idr_for_each_entry(&udev->commands, cmd, i) {
|
|
|
- if (tcmu_check_and_free_pending_cmd(cmd) != 0)
|
|
|
- all_expired = false;
|
|
|
- }
|
|
|
- idr_destroy(&udev->commands);
|
|
|
- spin_unlock_irq(&udev->commands_lock);
|
|
|
- WARN_ON(!all_expired);
|
|
|
-
|
|
|
- tcmu_blocks_release(udev);
|
|
|
-
|
|
|
tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
|
|
|
|
|
|
uio_unregister_device(&udev->uio_info);
|