lightnvm: pblk: use exact free block counter in RL

Until now, pblk's rate-limiter has used a heuristic to reserve space for
GC I/O given that the over-provision area was fixed.

In preparation for allowing to define the over-provision area on target
creation, define a dedicated free_block counter in the rate-limiter to
track the number of blocks being used for user data.

Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Hans Holmberg <hans.holmberg@cnexlabs.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index dacc719..0d457b1 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -89,17 +89,15 @@ unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
 	return atomic_read(&rl->free_blocks);
 }
 
-/*
- * We check for (i) the number of free blocks in the current LUN and (ii) the
- * total number of free blocks in the pblk instance. This is to even out the
- * number of free blocks on each LUN when GC kicks in.
- *
- * Only the total number of free blocks is used to configure the rate limiter.
- */
-void pblk_rl_update_rates(struct pblk_rl *rl)
+unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
+{
+	return atomic_read(&rl->free_user_blocks);
+}
+
+static void __pblk_rl_update_rates(struct pblk_rl *rl,
+				   unsigned long free_blocks)
 {
 	struct pblk *pblk = container_of(rl, struct pblk, rl);
-	unsigned long free_blocks = pblk_rl_nr_free_blks(rl);
 	int max = rl->rb_budget;
 
 	if (free_blocks >= rl->high) {
@@ -132,20 +130,37 @@ void pblk_rl_update_rates(struct pblk_rl *rl)
 		pblk_gc_should_stop(pblk);
 }
 
+void pblk_rl_update_rates(struct pblk_rl *rl)
+{
+	__pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
+}
+
 void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
 {
 	int blk_in_line = atomic_read(&line->blk_in_line);
+	int free_blocks;
 
 	atomic_add(blk_in_line, &rl->free_blocks);
-	pblk_rl_update_rates(rl);
+	free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
+
+	__pblk_rl_update_rates(rl, free_blocks);
 }
 
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
+void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
+			    bool used)
 {
 	int blk_in_line = atomic_read(&line->blk_in_line);
+	int free_blocks;
 
 	atomic_sub(blk_in_line, &rl->free_blocks);
-	pblk_rl_update_rates(rl);
+
+	if (used)
+		free_blocks = atomic_sub_return(blk_in_line,
+							&rl->free_user_blocks);
+	else
+		free_blocks = atomic_read(&rl->free_user_blocks);
+
+	__pblk_rl_update_rates(rl, free_blocks);
 }
 
 int pblk_rl_high_thrs(struct pblk_rl *rl)
@@ -174,16 +189,21 @@ void pblk_rl_free(struct pblk_rl *rl)
 void pblk_rl_init(struct pblk_rl *rl, int budget)
 {
 	struct pblk *pblk = container_of(rl, struct pblk, rl);
+	struct nvm_tgt_dev *dev = pblk->dev;
+	struct nvm_geo *geo = &dev->geo;
+	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 	struct pblk_line_meta *lm = &pblk->lm;
 	int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
+	int sec_meta, blk_meta;
+
 	unsigned int rb_windows;
 
-	rl->high = rl->total_blocks / PBLK_USER_HIGH_THRS;
-	rl->high_pw = get_count_order(rl->high);
+	/* Consider sectors used for metadata */
+	sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
+	blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
 
-	rl->low = rl->total_blocks / PBLK_USER_LOW_THRS;
-	if (rl->low < min_blocks)
-		rl->low = min_blocks;
+	rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
+	rl->high_pw = get_count_order(rl->high);
 
 	rl->rsv_blocks = min_blocks;