|
@@ -175,18 +175,17 @@ static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
|
|
|
}
|
|
|
|
|
|
/* requires lun->lock taken */
|
|
|
-static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
|
|
|
+static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
|
|
|
+ struct rrpc_block **cur_rblk)
|
|
|
{
|
|
|
struct rrpc *rrpc = rlun->rrpc;
|
|
|
|
|
|
- BUG_ON(!rblk);
|
|
|
-
|
|
|
- if (rlun->cur) {
|
|
|
- spin_lock(&rlun->cur->lock);
|
|
|
- WARN_ON(!block_is_full(rrpc, rlun->cur));
|
|
|
- spin_unlock(&rlun->cur->lock);
|
|
|
+ if (*cur_rblk) {
|
|
|
+ spin_lock(&(*cur_rblk)->lock);
|
|
|
+ WARN_ON(!block_is_full(rrpc, *cur_rblk));
|
|
|
+ spin_unlock(&(*cur_rblk)->lock);
|
|
|
}
|
|
|
- rlun->cur = rblk;
|
|
|
+ *cur_rblk = new_rblk;
|
|
|
}
|
|
|
|
|
|
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
|
|
@@ -577,21 +576,20 @@ out:
|
|
|
return addr;
|
|
|
}
|
|
|
|
|
|
-/* Simple round-robin Logical to physical address translation.
|
|
|
- *
|
|
|
- * Retrieve the mapping using the active append point. Then update the ap for
|
|
|
- * the next write to the disk.
|
|
|
+/* Map logical address to a physical page. The mapping implements a round robin
|
|
|
+ * approach and allocates a page from the next lun available.
|
|
|
*
|
|
|
- * Returns rrpc_addr with the physical address and block. Remember to return to
|
|
|
- * rrpc->addr_cache when request is finished.
|
|
|
+ * Returns rrpc_addr with the physical address and block. Returns NULL if no
|
|
|
+ * blocks in the next rlun are available.
|
|
|
*/
|
|
|
static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
|
|
|
int is_gc)
|
|
|
{
|
|
|
struct rrpc_lun *rlun;
|
|
|
- struct rrpc_block *rblk;
|
|
|
+ struct rrpc_block *rblk, **cur_rblk;
|
|
|
struct nvm_lun *lun;
|
|
|
u64 paddr;
|
|
|
+ int gc_force = 0;
|
|
|
|
|
|
rlun = rrpc_get_lun_rr(rrpc, is_gc);
|
|
|
lun = rlun->parent;
|
|
@@ -599,41 +597,65 @@ static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
|
|
|
if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
|
|
|
return NULL;
|
|
|
|
|
|
- spin_lock(&rlun->lock);
|
|
|
+ /*
|
|
|
+ * page allocation steps:
|
|
|
+ * 1. Try to allocate new page from current rblk
|
|
|
+ * 2a. If succeed, proceed to map it in and return
|
|
|
+ * 2b. If fail, first try to allocate a new block from media manger,
|
|
|
+ * and then retry step 1. Retry until the normal block pool is
|
|
|
+ * exhausted.
|
|
|
+ * 3. If exhausted, and garbage collector is requesting the block,
|
|
|
+ * go to the reserved block and retry step 1.
|
|
|
+ * In the case that this fails as well, or it is not GC
|
|
|
+ * requesting, report not able to retrieve a block and let the
|
|
|
+ * caller handle further processing.
|
|
|
+ */
|
|
|
|
|
|
+ spin_lock(&rlun->lock);
|
|
|
+ cur_rblk = &rlun->cur;
|
|
|
rblk = rlun->cur;
|
|
|
retry:
|
|
|
paddr = rrpc_alloc_addr(rrpc, rblk);
|
|
|
|
|
|
- if (paddr == ADDR_EMPTY) {
|
|
|
- rblk = rrpc_get_blk(rrpc, rlun, 0);
|
|
|
- if (rblk) {
|
|
|
- rrpc_set_lun_cur(rlun, rblk);
|
|
|
- goto retry;
|
|
|
- }
|
|
|
+ if (paddr != ADDR_EMPTY)
|
|
|
+ goto done;
|
|
|
|
|
|
- if (is_gc) {
|
|
|
- /* retry from emergency gc block */
|
|
|
- paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
|
|
|
- if (paddr == ADDR_EMPTY) {
|
|
|
- rblk = rrpc_get_blk(rrpc, rlun, 1);
|
|
|
- if (!rblk) {
|
|
|
- pr_err("rrpc: no more blocks");
|
|
|
- goto err;
|
|
|
- }
|
|
|
-
|
|
|
- rlun->gc_cur = rblk;
|
|
|
- paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
|
|
|
- }
|
|
|
- rblk = rlun->gc_cur;
|
|
|
- }
|
|
|
+ if (!list_empty(&rlun->wblk_list)) {
|
|
|
+new_blk:
|
|
|
+ rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
|
|
|
+ prio);
|
|
|
+ rrpc_set_lun_cur(rlun, rblk, cur_rblk);
|
|
|
+ list_del(&rblk->prio);
|
|
|
+ goto retry;
|
|
|
+ }
|
|
|
+ spin_unlock(&rlun->lock);
|
|
|
+
|
|
|
+ rblk = rrpc_get_blk(rrpc, rlun, gc_force);
|
|
|
+ if (rblk) {
|
|
|
+ spin_lock(&rlun->lock);
|
|
|
+ list_add_tail(&rblk->prio, &rlun->wblk_list);
|
|
|
+ /*
|
|
|
+ * another thread might already have added a new block,
|
|
|
+ * Therefore, make sure that one is used, instead of the
|
|
|
+ * one just added.
|
|
|
+ */
|
|
|
+ goto new_blk;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(is_gc) && !gc_force) {
|
|
|
+ /* retry from emergency gc block */
|
|
|
+ cur_rblk = &rlun->gc_cur;
|
|
|
+ rblk = rlun->gc_cur;
|
|
|
+ gc_force = 1;
|
|
|
+ spin_lock(&rlun->lock);
|
|
|
+ goto retry;
|
|
|
}
|
|
|
|
|
|
+ pr_err("rrpc: failed to allocate new block\n");
|
|
|
+ return NULL;
|
|
|
+done:
|
|
|
spin_unlock(&rlun->lock);
|
|
|
return rrpc_update_map(rrpc, laddr, rblk, paddr);
|
|
|
-err:
|
|
|
- spin_unlock(&rlun->lock);
|
|
|
- return NULL;
|
|
|
}
|
|
|
|
|
|
static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
|
|
@@ -1177,6 +1199,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
|
|
|
|
|
|
rlun->rrpc = rrpc;
|
|
|
INIT_LIST_HEAD(&rlun->prio_list);
|
|
|
+ INIT_LIST_HEAD(&rlun->wblk_list);
|
|
|
|
|
|
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
|
|
|
spin_lock_init(&rlun->lock);
|
|
@@ -1317,14 +1340,13 @@ static int rrpc_luns_configure(struct rrpc *rrpc)
|
|
|
rblk = rrpc_get_blk(rrpc, rlun, 0);
|
|
|
if (!rblk)
|
|
|
goto err;
|
|
|
-
|
|
|
- rrpc_set_lun_cur(rlun, rblk);
|
|
|
+ rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
|
|
|
|
|
|
/* Emergency gc block */
|
|
|
rblk = rrpc_get_blk(rrpc, rlun, 1);
|
|
|
if (!rblk)
|
|
|
goto err;
|
|
|
- rlun->gc_cur = rblk;
|
|
|
+ rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
|
|
|
}
|
|
|
|
|
|
return 0;
|