Selaa lähdekoodia

lightnvm: pblk: remove debug from pblk_[down/up]_page

Remove the debug only iteration within __pblk_down_page, which
then allows us to reduce the number of arguments down to pblk and
the parallel unit from the functions that calls it. Simplifying the
callers logic considerably.

Also, rename the functions pblk_[down/up]_page to
pblk_[down/up]_chunk, to communicate that it manages the write
pointer of the chunk. Note that it also protects the parallel unit
such that at most one chunk is active per parallel unit.

Signed-off-by: Matias Bjørling <mb@lightnvm.io>
Reviewed-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Matias Bjørling 6 vuotta sitten
vanhempi
commit
43241cfe47

+ 9 - 25
drivers/lightnvm/pblk-core.c

@@ -1861,8 +1861,7 @@ void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
 	queue_work(wq, &line_ws->ws);
 	queue_work(wq, &line_ws->ws);
 }
 }
 
 
-static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
-			     int nr_ppas, int pos)
+static void __pblk_down_chunk(struct pblk *pblk, int pos)
 {
 {
 	struct pblk_lun *rlun = &pblk->luns[pos];
 	struct pblk_lun *rlun = &pblk->luns[pos];
 	int ret;
 	int ret;
@@ -1871,13 +1870,6 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
 	 * Only send one inflight I/O per LUN. Since we map at a page
 	 * Only send one inflight I/O per LUN. Since we map at a page
 	 * granurality, all ppas in the I/O will map to the same LUN
 	 * granurality, all ppas in the I/O will map to the same LUN
 	 */
 	 */
-#ifdef CONFIG_NVM_PBLK_DEBUG
-	int i;
-
-	for (i = 1; i < nr_ppas; i++)
-		WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
-				ppa_list[0].a.ch != ppa_list[i].a.ch);
-#endif
 
 
 	ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
 	ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
 	if (ret == -ETIME || ret == -EINTR)
 	if (ret == -ETIME || ret == -EINTR)
@@ -1885,21 +1877,21 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
 				-ret);
 				-ret);
 }
 }
 
 
-void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
+void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
 {
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_geo *geo = &dev->geo;
 	struct nvm_geo *geo = &dev->geo;
-	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
+	int pos = pblk_ppa_to_pos(geo, ppa);
 
 
-	__pblk_down_page(pblk, ppa_list, nr_ppas, pos);
+	__pblk_down_chunk(pblk, pos);
 }
 }
 
 
-void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
+void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
 		  unsigned long *lun_bitmap)
 		  unsigned long *lun_bitmap)
 {
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_geo *geo = &dev->geo;
 	struct nvm_geo *geo = &dev->geo;
-	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
+	int pos = pblk_ppa_to_pos(geo, ppa);
 
 
 	/* If the LUN has been locked for this same request, do no attempt to
 	/* If the LUN has been locked for this same request, do no attempt to
 	 * lock it again
 	 * lock it again
@@ -1907,23 +1899,15 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
 	if (test_and_set_bit(pos, lun_bitmap))
 	if (test_and_set_bit(pos, lun_bitmap))
 		return;
 		return;
 
 
-	__pblk_down_page(pblk, ppa_list, nr_ppas, pos);
+	__pblk_down_chunk(pblk, pos);
 }
 }
 
 
-void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
+void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
 {
 {
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_tgt_dev *dev = pblk->dev;
 	struct nvm_geo *geo = &dev->geo;
 	struct nvm_geo *geo = &dev->geo;
 	struct pblk_lun *rlun;
 	struct pblk_lun *rlun;
-	int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
-	int i;
-
-	for (i = 1; i < nr_ppas; i++)
-		WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
-				ppa_list[0].a.ch != ppa_list[i].a.ch);
-#endif
+	int pos = pblk_ppa_to_pos(geo, ppa);
 
 
 	rlun = &pblk->luns[pos];
 	rlun = &pblk->luns[pos];
 	up(&rlun->wr_sem);
 	up(&rlun->wr_sem);

+ 1 - 1
drivers/lightnvm/pblk-map.c

@@ -79,7 +79,7 @@ static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
 		}
 		}
 	}
 	}
 
 
-	pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap);
+	pblk_down_rq(pblk, ppa_list[0], lun_bitmap);
 	return 0;
 	return 0;
 }
 }
 
 

+ 3 - 3
drivers/lightnvm/pblk-recovery.c

@@ -227,7 +227,7 @@ static void pblk_end_io_recov(struct nvm_rq *rqd)
 	struct pblk_pad_rq *pad_rq = rqd->private;
 	struct pblk_pad_rq *pad_rq = rqd->private;
 	struct pblk *pblk = pad_rq->pblk;
 	struct pblk *pblk = pad_rq->pblk;
 
 
-	pblk_up_page(pblk, ppa_list, rqd->nr_ppas);
+	pblk_up_chunk(pblk, ppa_list[0]);
 
 
 	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
 	pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
 
 
@@ -339,12 +339,12 @@ next_pad_rq:
 	}
 	}
 
 
 	kref_get(&pad_rq->ref);
 	kref_get(&pad_rq->ref);
-	pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+	pblk_down_chunk(pblk, rqd->ppa_list[0]);
 
 
 	ret = pblk_submit_io(pblk, rqd);
 	ret = pblk_submit_io(pblk, rqd);
 	if (ret) {
 	if (ret) {
 		pblk_err(pblk, "I/O submission failed: %d\n", ret);
 		pblk_err(pblk, "I/O submission failed: %d\n", ret);
-		pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+		pblk_up_chunk(pblk, rqd->ppa_list[0]);
 		goto fail_free_bio;
 		goto fail_free_bio;
 	}
 	}
 
 

+ 3 - 3
drivers/lightnvm/pblk-write.c

@@ -270,7 +270,7 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 	int sync;
 	int sync;
 
 
-	pblk_up_page(pblk, ppa_list, rqd->nr_ppas);
+	pblk_up_chunk(pblk, ppa_list[0]);
 
 
 	if (rqd->error) {
 	if (rqd->error) {
 		pblk_log_write_err(pblk, rqd);
 		pblk_log_write_err(pblk, rqd);
@@ -420,7 +420,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
 		list_del(&meta_line->list);
 		list_del(&meta_line->list);
 	spin_unlock(&l_mg->close_lock);
 	spin_unlock(&l_mg->close_lock);
 
 
-	pblk_down_page(pblk, ppa_list, rqd->nr_ppas);
+	pblk_down_chunk(pblk, ppa_list[0]);
 
 
 	ret = pblk_submit_io(pblk, rqd);
 	ret = pblk_submit_io(pblk, rqd);
 	if (ret) {
 	if (ret) {
@@ -431,7 +431,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
 	return NVM_IO_OK;
 	return NVM_IO_OK;
 
 
 fail_rollback:
 fail_rollback:
-	pblk_up_page(pblk, ppa_list, rqd->nr_ppas);
+	pblk_up_chunk(pblk, ppa_list[0]);
 	spin_lock(&l_mg->close_lock);
 	spin_lock(&l_mg->close_lock);
 	pblk_dealloc_page(pblk, meta_line, rq_ppas);
 	pblk_dealloc_page(pblk, meta_line, rq_ppas);
 	list_add(&meta_line->list, &meta_line->list);
 	list_add(&meta_line->list, &meta_line->list);

+ 3 - 3
drivers/lightnvm/pblk.h

@@ -823,10 +823,10 @@ u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 		   unsigned long secs_to_flush);
 		   unsigned long secs_to_flush);
-void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
-void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
+void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
 		  unsigned long *lun_bitmap);
 		  unsigned long *lun_bitmap);
-void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
+void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa);
+void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa);
 void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap);
 void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap);
 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
 int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
 		       int nr_pages);
 		       int nr_pages);