blob: 05e6b2e9221df380b9e7c9525e2ff5d985fd8e53 [file] [log] [blame]
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 *
5 * Based upon the circular ringbuffer.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * pblk-rb.c - pblk's write buffer
17 */
18
19#include <linux/circ_buf.h>
20
21#include "pblk.h"
22
23static DECLARE_RWSEM(pblk_rb_lock);
24
25void pblk_rb_data_free(struct pblk_rb *rb)
26{
27 struct pblk_rb_pages *p, *t;
28
29 down_write(&pblk_rb_lock);
30 list_for_each_entry_safe(p, t, &rb->pages, list) {
31 free_pages((unsigned long)page_address(p->pages), p->order);
32 list_del(&p->list);
33 kfree(p);
34 }
35 up_write(&pblk_rb_lock);
36}
37
38/*
39 * Initialize ring buffer. The data and metadata buffers must be previously
40 * allocated and their size must be a power of two
41 * (Documentation/circular-buffers.txt)
42 */
43int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
44 unsigned int power_size, unsigned int power_seg_sz)
45{
46 struct pblk *pblk = container_of(rb, struct pblk, rwb);
47 unsigned int init_entry = 0;
48 unsigned int alloc_order = power_size;
49 unsigned int max_order = MAX_ORDER - 1;
50 unsigned int order, iter;
51
52 down_write(&pblk_rb_lock);
53 rb->entries = rb_entry_base;
54 rb->seg_size = (1 << power_seg_sz);
55 rb->nr_entries = (1 << power_size);
56 rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
57 rb->sync_point = EMPTY_ENTRY;
58
59 spin_lock_init(&rb->w_lock);
60 spin_lock_init(&rb->s_lock);
61
62 INIT_LIST_HEAD(&rb->pages);
63
64 if (alloc_order >= max_order) {
65 order = max_order;
66 iter = (1 << (alloc_order - max_order));
67 } else {
68 order = alloc_order;
69 iter = 1;
70 }
71
72 do {
73 struct pblk_rb_entry *entry;
74 struct pblk_rb_pages *page_set;
75 void *kaddr;
76 unsigned long set_size;
77 int i;
78
79 page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL);
80 if (!page_set) {
81 up_write(&pblk_rb_lock);
82 return -ENOMEM;
83 }
84
85 page_set->order = order;
86 page_set->pages = alloc_pages(GFP_KERNEL, order);
87 if (!page_set->pages) {
88 kfree(page_set);
89 pblk_rb_data_free(rb);
90 up_write(&pblk_rb_lock);
91 return -ENOMEM;
92 }
93 kaddr = page_address(page_set->pages);
94
95 entry = &rb->entries[init_entry];
96 entry->data = kaddr;
97 entry->cacheline = pblk_cacheline_to_addr(init_entry++);
98 entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
99
100 set_size = (1 << order);
101 for (i = 1; i < set_size; i++) {
102 entry = &rb->entries[init_entry];
103 entry->cacheline = pblk_cacheline_to_addr(init_entry++);
104 entry->data = kaddr + (i * rb->seg_size);
105 entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
106 bio_list_init(&entry->w_ctx.bios);
107 }
108
109 list_add_tail(&page_set->list, &rb->pages);
110 iter--;
111 } while (iter > 0);
112 up_write(&pblk_rb_lock);
113
114#ifdef CONFIG_NVM_DEBUG
115 atomic_set(&rb->inflight_sync_point, 0);
116#endif
117
118 /*
119 * Initialize rate-limiter, which controls access to the write buffer
120 * but user and GC I/O
121 */
122 pblk_rl_init(&pblk->rl, rb->nr_entries);
123
124 return 0;
125}
126
127/*
128 * pblk_rb_calculate_size -- calculate the size of the write buffer
129 */
130unsigned int pblk_rb_calculate_size(unsigned int nr_entries)
131{
132 /* Alloc a write buffer that can at least fit 128 entries */
133 return (1 << max(get_count_order(nr_entries), 7));
134}
135
136void *pblk_rb_entries_ref(struct pblk_rb *rb)
137{
138 return rb->entries;
139}
140
141static void clean_wctx(struct pblk_w_ctx *w_ctx)
142{
143 int flags;
144
145try:
146 flags = READ_ONCE(w_ctx->flags);
147 if (!(flags & PBLK_SUBMITTED_ENTRY))
148 goto try;
149
150 /* Release flags on context. Protect from writes and reads */
151 smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY);
152 pblk_ppa_set_empty(&w_ctx->ppa);
Javier González076984662017-06-30 17:56:42 +0200153 w_ctx->lba = ADDR_EMPTY;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200154}
155
156#define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
157#define pblk_rb_ring_space(rb, head, tail, size) \
158 (CIRC_SPACE(head, tail, size))
159
160/*
161 * Buffer space is calculated with respect to the back pointer signaling
162 * synchronized entries to the media.
163 */
164static unsigned int pblk_rb_space(struct pblk_rb *rb)
165{
166 unsigned int mem = READ_ONCE(rb->mem);
167 unsigned int sync = READ_ONCE(rb->sync);
168
169 return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries);
170}
171
172/*
173 * Buffer count is calculated with respect to the submission entry signaling the
174 * entries that are available to send to the media
175 */
176unsigned int pblk_rb_read_count(struct pblk_rb *rb)
177{
178 unsigned int mem = READ_ONCE(rb->mem);
179 unsigned int subm = READ_ONCE(rb->subm);
180
181 return pblk_rb_ring_count(mem, subm, rb->nr_entries);
182}
183
Javier Gonzálezee8d5c12017-06-30 17:56:40 +0200184unsigned int pblk_rb_sync_count(struct pblk_rb *rb)
185{
186 unsigned int mem = READ_ONCE(rb->mem);
187 unsigned int sync = READ_ONCE(rb->sync);
188
189 return pblk_rb_ring_count(mem, sync, rb->nr_entries);
190}
191
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200192unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
193{
194 unsigned int subm;
195
196 subm = READ_ONCE(rb->subm);
197 /* Commit read means updating submission pointer */
198 smp_store_release(&rb->subm,
199 (subm + nr_entries) & (rb->nr_entries - 1));
200
201 return subm;
202}
203
204static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd,
205 unsigned int to_update)
206{
207 struct pblk *pblk = container_of(rb, struct pblk, rwb);
208 struct pblk_line *line;
209 struct pblk_rb_entry *entry;
210 struct pblk_w_ctx *w_ctx;
Javier Gonzálezb20ba1b2017-06-26 11:57:27 +0200211 unsigned int user_io = 0, gc_io = 0;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200212 unsigned int i;
Javier Gonzálezb20ba1b2017-06-26 11:57:27 +0200213 int flags;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200214
215 for (i = 0; i < to_update; i++) {
216 entry = &rb->entries[*l2p_upd];
217 w_ctx = &entry->w_ctx;
218
Javier Gonzálezb20ba1b2017-06-26 11:57:27 +0200219 flags = READ_ONCE(entry->w_ctx.flags);
220 if (flags & PBLK_IOTYPE_USER)
221 user_io++;
222 else if (flags & PBLK_IOTYPE_GC)
223 gc_io++;
224 else
225 WARN(1, "pblk: unknown IO type\n");
226
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200227 pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
228 entry->cacheline);
229
230 line = &pblk->lines[pblk_tgt_ppa_to_line(w_ctx->ppa)];
231 kref_put(&line->ref, pblk_line_put);
232 clean_wctx(w_ctx);
233 *l2p_upd = (*l2p_upd + 1) & (rb->nr_entries - 1);
234 }
235
Javier Gonzálezb20ba1b2017-06-26 11:57:27 +0200236 pblk_rl_out(&pblk->rl, user_io, gc_io);
237
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200238 return 0;
239}
240
241/*
242 * When we move the l2p_update pointer, we update the l2p table - lookups will
243 * point to the physical address instead of to the cacheline in the write buffer
244 * from this moment on.
245 */
246static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries,
247 unsigned int mem, unsigned int sync)
248{
249 unsigned int space, count;
250 int ret = 0;
251
252 lockdep_assert_held(&rb->w_lock);
253
254 /* Update l2p only as buffer entries are being overwritten */
255 space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries);
256 if (space > nr_entries)
257 goto out;
258
259 count = nr_entries - space;
260 /* l2p_update used exclusively under rb->w_lock */
261 ret = __pblk_rb_update_l2p(rb, &rb->l2p_update, count);
262
263out:
264 return ret;
265}
266
267/*
268 * Update the l2p entry for all sectors stored on the write buffer. This means
269 * that all future lookups to the l2p table will point to a device address, not
270 * to the cacheline in the write buffer.
271 */
272void pblk_rb_sync_l2p(struct pblk_rb *rb)
273{
274 unsigned int sync;
275 unsigned int to_update;
276
277 spin_lock(&rb->w_lock);
278
279 /* Protect from reads and writes */
280 sync = smp_load_acquire(&rb->sync);
281
282 to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries);
283 __pblk_rb_update_l2p(rb, &rb->l2p_update, to_update);
284
285 spin_unlock(&rb->w_lock);
286}
287
288/*
289 * Write @nr_entries to ring buffer from @data buffer if there is enough space.
290 * Typically, 4KB data chunks coming from a bio will be copied to the ring
291 * buffer, thus the write will fail if not all incoming data can be copied.
292 *
293 */
294static void __pblk_rb_write_entry(struct pblk_rb *rb, void *data,
295 struct pblk_w_ctx w_ctx,
296 struct pblk_rb_entry *entry)
297{
298 memcpy(entry->data, data, rb->seg_size);
299
300 entry->w_ctx.lba = w_ctx.lba;
301 entry->w_ctx.ppa = w_ctx.ppa;
302}
303
304void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
305 struct pblk_w_ctx w_ctx, unsigned int ring_pos)
306{
307 struct pblk *pblk = container_of(rb, struct pblk, rwb);
308 struct pblk_rb_entry *entry;
309 int flags;
310
311 entry = &rb->entries[ring_pos];
312 flags = READ_ONCE(entry->w_ctx.flags);
313#ifdef CONFIG_NVM_DEBUG
314 /* Caller must guarantee that the entry is free */
315 BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
316#endif
317
318 __pblk_rb_write_entry(rb, data, w_ctx, entry);
319
320 pblk_update_map_cache(pblk, w_ctx.lba, entry->cacheline);
321 flags = w_ctx.flags | PBLK_WRITTEN_DATA;
322
323 /* Release flags on write context. Protect from writes */
324 smp_store_release(&entry->w_ctx.flags, flags);
325}
326
327void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
Javier Gonzálezd3401212017-10-13 14:46:14 +0200328 struct pblk_w_ctx w_ctx, struct pblk_line *line,
329 u64 paddr, unsigned int ring_pos)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200330{
331 struct pblk *pblk = container_of(rb, struct pblk, rwb);
332 struct pblk_rb_entry *entry;
333 int flags;
334
335 entry = &rb->entries[ring_pos];
336 flags = READ_ONCE(entry->w_ctx.flags);
337#ifdef CONFIG_NVM_DEBUG
338 /* Caller must guarantee that the entry is free */
339 BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
340#endif
341
342 __pblk_rb_write_entry(rb, data, w_ctx, entry);
343
Javier Gonzálezd3401212017-10-13 14:46:14 +0200344 if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr))
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200345 entry->w_ctx.lba = ADDR_EMPTY;
346
347 flags = w_ctx.flags | PBLK_WRITTEN_DATA;
348
349 /* Release flags on write context. Protect from writes */
350 smp_store_release(&entry->w_ctx.flags, flags);
351}
352
353static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
354 unsigned int pos)
355{
356 struct pblk_rb_entry *entry;
357 unsigned int subm, sync_point;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200358
359 subm = READ_ONCE(rb->subm);
360
361#ifdef CONFIG_NVM_DEBUG
362 atomic_inc(&rb->inflight_sync_point);
363#endif
364
365 if (pos == subm)
366 return 0;
367
368 sync_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
369 entry = &rb->entries[sync_point];
370
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200371 /* Protect syncs */
372 smp_store_release(&rb->sync_point, sync_point);
373
Javier González588726d32017-06-26 11:57:29 +0200374 if (!bio)
375 return 0;
376
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200377 spin_lock_irq(&rb->s_lock);
378 bio_list_add(&entry->w_ctx.bios, bio);
379 spin_unlock_irq(&rb->s_lock);
380
381 return 1;
382}
383
384static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
385 unsigned int *pos)
386{
387 unsigned int mem;
388 unsigned int sync;
389
390 sync = READ_ONCE(rb->sync);
391 mem = READ_ONCE(rb->mem);
392
393 if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < nr_entries)
394 return 0;
395
396 if (pblk_rb_update_l2p(rb, nr_entries, mem, sync))
397 return 0;
398
399 *pos = mem;
400
401 return 1;
402}
403
404static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
405 unsigned int *pos)
406{
407 if (!__pblk_rb_may_write(rb, nr_entries, pos))
408 return 0;
409
410 /* Protect from read count */
411 smp_store_release(&rb->mem, (*pos + nr_entries) & (rb->nr_entries - 1));
412 return 1;
413}
414
Javier González588726d32017-06-26 11:57:29 +0200415void pblk_rb_flush(struct pblk_rb *rb)
416{
417 struct pblk *pblk = container_of(rb, struct pblk, rwb);
418 unsigned int mem = READ_ONCE(rb->mem);
419
420 if (pblk_rb_sync_point_set(rb, NULL, mem))
421 return;
422
423 pblk_write_should_kick(pblk);
424}
425
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200426static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
427 unsigned int *pos, struct bio *bio,
428 int *io_ret)
429{
430 unsigned int mem;
431
432 if (!__pblk_rb_may_write(rb, nr_entries, pos))
433 return 0;
434
435 mem = (*pos + nr_entries) & (rb->nr_entries - 1);
436 *io_ret = NVM_IO_DONE;
437
438 if (bio->bi_opf & REQ_PREFLUSH) {
439 struct pblk *pblk = container_of(rb, struct pblk, rwb);
440
441#ifdef CONFIG_NVM_DEBUG
442 atomic_long_inc(&pblk->nr_flush);
443#endif
444 if (pblk_rb_sync_point_set(&pblk->rwb, bio, mem))
445 *io_ret = NVM_IO_OK;
446 }
447
448 /* Protect from read count */
449 smp_store_release(&rb->mem, mem);
Javier González6ca2f712017-10-13 14:46:17 +0200450
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200451 return 1;
452}
453
454/*
455 * Atomically check that (i) there is space on the write buffer for the
456 * incoming I/O, and (ii) the current I/O type has enough budget in the write
457 * buffer (rate-limiter).
458 */
459int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
460 unsigned int nr_entries, unsigned int *pos)
461{
462 struct pblk *pblk = container_of(rb, struct pblk, rwb);
Javier González588726d32017-06-26 11:57:29 +0200463 int io_ret;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200464
465 spin_lock(&rb->w_lock);
Javier González588726d32017-06-26 11:57:29 +0200466 io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries);
467 if (io_ret) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200468 spin_unlock(&rb->w_lock);
Javier González588726d32017-06-26 11:57:29 +0200469 return io_ret;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200470 }
471
Javier González588726d32017-06-26 11:57:29 +0200472 if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200473 spin_unlock(&rb->w_lock);
474 return NVM_IO_REQUEUE;
475 }
476
477 pblk_rl_user_in(&pblk->rl, nr_entries);
478 spin_unlock(&rb->w_lock);
479
Javier González588726d32017-06-26 11:57:29 +0200480 return io_ret;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200481}
482
483/*
484 * Look at pblk_rb_may_write_user comment
485 */
486int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
487 unsigned int *pos)
488{
489 struct pblk *pblk = container_of(rb, struct pblk, rwb);
490
491 spin_lock(&rb->w_lock);
492 if (!pblk_rl_gc_may_insert(&pblk->rl, nr_entries)) {
493 spin_unlock(&rb->w_lock);
494 return 0;
495 }
496
497 if (!pblk_rb_may_write(rb, nr_entries, pos)) {
498 spin_unlock(&rb->w_lock);
499 return 0;
500 }
501
502 pblk_rl_gc_in(&pblk->rl, nr_entries);
503 spin_unlock(&rb->w_lock);
504
505 return 1;
506}
507
508/*
509 * The caller of this function must ensure that the backpointer will not
510 * overwrite the entries passed on the list.
511 */
512unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
513 struct list_head *list,
514 unsigned int max)
515{
516 struct pblk_rb_entry *entry, *tentry;
517 struct page *page;
518 unsigned int read = 0;
519 int ret;
520
521 list_for_each_entry_safe(entry, tentry, list, index) {
522 if (read > max) {
523 pr_err("pblk: too many entries on list\n");
524 goto out;
525 }
526
527 page = virt_to_page(entry->data);
528 if (!page) {
529 pr_err("pblk: could not allocate write bio page\n");
530 goto out;
531 }
532
533 ret = bio_add_page(bio, page, rb->seg_size, 0);
534 if (ret != rb->seg_size) {
535 pr_err("pblk: could not add page to write bio\n");
536 goto out;
537 }
538
539 list_del(&entry->index);
540 read++;
541 }
542
543out:
544 return read;
545}
546
547/*
548 * Read available entries on rb and add them to the given bio. To avoid a memory
549 * copy, a page reference to the write buffer is used to be added to the bio.
550 *
551 * This function is used by the write thread to form the write bio that will
552 * persist data on the write buffer to the media.
553 */
Javier Gonzálezd624f372017-06-26 11:57:15 +0200554unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
555 struct bio *bio, unsigned int pos,
556 unsigned int nr_entries, unsigned int count)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200557{
558 struct pblk *pblk = container_of(rb, struct pblk, rwb);
Javier Gonzálezd624f372017-06-26 11:57:15 +0200559 struct request_queue *q = pblk->dev->q;
560 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200561 struct pblk_rb_entry *entry;
562 struct page *page;
Javier Gonzálezd624f372017-06-26 11:57:15 +0200563 unsigned int pad = 0, to_read = nr_entries;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200564 unsigned int i;
565 int flags;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200566
567 if (count < nr_entries) {
568 pad = nr_entries - count;
569 to_read = count;
570 }
571
572 c_ctx->sentry = pos;
573 c_ctx->nr_valid = to_read;
574 c_ctx->nr_padded = pad;
575
576 for (i = 0; i < to_read; i++) {
577 entry = &rb->entries[pos];
578
579 /* A write has been allowed into the buffer, but data is still
580 * being copied to it. It is ok to busy wait.
581 */
582try:
583 flags = READ_ONCE(entry->w_ctx.flags);
Javier González10888122017-06-30 17:56:37 +0200584 if (!(flags & PBLK_WRITTEN_DATA)) {
585 io_schedule();
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200586 goto try;
Javier González10888122017-06-30 17:56:37 +0200587 }
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200588
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200589 page = virt_to_page(entry->data);
590 if (!page) {
591 pr_err("pblk: could not allocate write bio page\n");
592 flags &= ~PBLK_WRITTEN_DATA;
593 flags |= PBLK_SUBMITTED_ENTRY;
594 /* Release flags on context. Protect from writes */
595 smp_store_release(&entry->w_ctx.flags, flags);
Javier Gonzálezd624f372017-06-26 11:57:15 +0200596 return NVM_IO_ERR;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200597 }
598
Javier Gonzálezd624f372017-06-26 11:57:15 +0200599 if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
600 rb->seg_size) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200601 pr_err("pblk: could not add page to write bio\n");
602 flags &= ~PBLK_WRITTEN_DATA;
603 flags |= PBLK_SUBMITTED_ENTRY;
604 /* Release flags on context. Protect from writes */
605 smp_store_release(&entry->w_ctx.flags, flags);
Javier Gonzálezd624f372017-06-26 11:57:15 +0200606 return NVM_IO_ERR;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200607 }
608
609 if (flags & PBLK_FLUSH_ENTRY) {
610 unsigned int sync_point;
611
612 sync_point = READ_ONCE(rb->sync_point);
613 if (sync_point == pos) {
614 /* Protect syncs */
615 smp_store_release(&rb->sync_point, EMPTY_ENTRY);
616 }
617
618 flags &= ~PBLK_FLUSH_ENTRY;
619#ifdef CONFIG_NVM_DEBUG
620 atomic_dec(&rb->inflight_sync_point);
621#endif
622 }
623
624 flags &= ~PBLK_WRITTEN_DATA;
625 flags |= PBLK_SUBMITTED_ENTRY;
626
627 /* Release flags on context. Protect from writes */
628 smp_store_release(&entry->w_ctx.flags, flags);
629
630 pos = (pos + 1) & (rb->nr_entries - 1);
631 }
632
Javier Gonzálezd624f372017-06-26 11:57:15 +0200633 if (pad) {
634 if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
635 pr_err("pblk: could not pad page in write bio\n");
636 return NVM_IO_ERR;
637 }
638 }
639
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200640#ifdef CONFIG_NVM_DEBUG
641 atomic_long_add(pad, &((struct pblk *)
642 (container_of(rb, struct pblk, rwb)))->padded_writes);
643#endif
Javier Gonzálezd624f372017-06-26 11:57:15 +0200644
645 return NVM_IO_OK;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200646}
647
648/*
649 * Copy to bio only if the lba matches the one on the given cache entry.
650 * Otherwise, it means that the entry has been overwritten, and the bio should
651 * be directed to disk.
652 */
653int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
Javier González75cb8e92017-07-28 15:13:16 +0200654 struct ppa_addr ppa, int bio_iter, bool advanced_bio)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200655{
Javier González076984662017-06-30 17:56:42 +0200656 struct pblk *pblk = container_of(rb, struct pblk, rwb);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200657 struct pblk_rb_entry *entry;
658 struct pblk_w_ctx *w_ctx;
Javier González076984662017-06-30 17:56:42 +0200659 struct ppa_addr l2p_ppa;
660 u64 pos = pblk_addr_to_cacheline(ppa);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200661 void *data;
662 int flags;
663 int ret = 1;
664
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200665
666#ifdef CONFIG_NVM_DEBUG
667 /* Caller must ensure that the access will not cause an overflow */
668 BUG_ON(pos >= rb->nr_entries);
669#endif
670 entry = &rb->entries[pos];
671 w_ctx = &entry->w_ctx;
672 flags = READ_ONCE(w_ctx->flags);
673
Javier González076984662017-06-30 17:56:42 +0200674 spin_lock(&rb->w_lock);
675 spin_lock(&pblk->trans_lock);
676 l2p_ppa = pblk_trans_map_get(pblk, lba);
677 spin_unlock(&pblk->trans_lock);
678
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200679 /* Check if the entry has been overwritten or is scheduled to be */
Javier González076984662017-06-30 17:56:42 +0200680 if (!pblk_ppa_comp(l2p_ppa, ppa) || w_ctx->lba != lba ||
681 flags & PBLK_WRITABLE_ENTRY) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200682 ret = 0;
683 goto out;
684 }
685
686 /* Only advance the bio if it hasn't been advanced already. If advanced,
687 * this bio is at least a partial bio (i.e., it has partially been
688 * filled with data from the cache). If part of the data resides on the
689 * media, we will read later on
690 */
Javier González75cb8e92017-07-28 15:13:16 +0200691 if (unlikely(!advanced_bio))
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200692 bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
693
694 data = bio_data(bio);
695 memcpy(data, entry->data, rb->seg_size);
696
697out:
698 spin_unlock(&rb->w_lock);
699 return ret;
700}
701
702struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos)
703{
704 unsigned int entry = pos & (rb->nr_entries - 1);
705
706 return &rb->entries[entry].w_ctx;
707}
708
709unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags)
710 __acquires(&rb->s_lock)
711{
712 if (flags)
713 spin_lock_irqsave(&rb->s_lock, *flags);
714 else
715 spin_lock_irq(&rb->s_lock);
716
717 return rb->sync;
718}
719
720void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
721 __releases(&rb->s_lock)
722{
723 lockdep_assert_held(&rb->s_lock);
724
725 if (flags)
726 spin_unlock_irqrestore(&rb->s_lock, *flags);
727 else
728 spin_unlock_irq(&rb->s_lock);
729}
730
731unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
732{
733 unsigned int sync;
734 unsigned int i;
735
736 lockdep_assert_held(&rb->s_lock);
737
738 sync = READ_ONCE(rb->sync);
739
740 for (i = 0; i < nr_entries; i++)
741 sync = (sync + 1) & (rb->nr_entries - 1);
742
743 /* Protect from counts */
744 smp_store_release(&rb->sync, sync);
745
746 return sync;
747}
748
749unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb)
750{
751 unsigned int subm, sync_point;
752 unsigned int count;
753
754 /* Protect syncs */
755 sync_point = smp_load_acquire(&rb->sync_point);
756 if (sync_point == EMPTY_ENTRY)
757 return 0;
758
759 subm = READ_ONCE(rb->subm);
760
761 /* The sync point itself counts as a sector to sync */
762 count = pblk_rb_ring_count(sync_point, subm, rb->nr_entries) + 1;
763
764 return count;
765}
766
767/*
768 * Scan from the current position of the sync pointer to find the entry that
769 * corresponds to the given ppa. This is necessary since write requests can be
770 * completed out of order. The assumption is that the ppa is close to the sync
771 * pointer thus the search will not take long.
772 *
773 * The caller of this function must guarantee that the sync pointer will no
774 * reach the entry while it is using the metadata associated with it. With this
775 * assumption in mind, there is no need to take the sync lock.
776 */
777struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
778 struct ppa_addr *ppa)
779{
780 unsigned int sync, subm, count;
781 unsigned int i;
782
783 sync = READ_ONCE(rb->sync);
784 subm = READ_ONCE(rb->subm);
785 count = pblk_rb_ring_count(subm, sync, rb->nr_entries);
786
787 for (i = 0; i < count; i++)
788 sync = (sync + 1) & (rb->nr_entries - 1);
789
790 return NULL;
791}
792
793int pblk_rb_tear_down_check(struct pblk_rb *rb)
794{
795 struct pblk_rb_entry *entry;
796 int i;
797 int ret = 0;
798
799 spin_lock(&rb->w_lock);
800 spin_lock_irq(&rb->s_lock);
801
802 if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
803 (rb->sync == rb->l2p_update) &&
804 (rb->sync_point == EMPTY_ENTRY)) {
805 goto out;
806 }
807
808 if (!rb->entries) {
809 ret = 1;
810 goto out;
811 }
812
813 for (i = 0; i < rb->nr_entries; i++) {
814 entry = &rb->entries[i];
815
816 if (!entry->data) {
817 ret = 1;
818 goto out;
819 }
820 }
821
822out:
823 spin_unlock(&rb->w_lock);
824 spin_unlock_irq(&rb->s_lock);
825
826 return ret;
827}
828
829unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos)
830{
831 return (pos & (rb->nr_entries - 1));
832}
833
834int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos)
835{
836 return (pos >= rb->nr_entries);
837}
838
839ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
840{
841 struct pblk *pblk = container_of(rb, struct pblk, rwb);
842 struct pblk_c_ctx *c;
843 ssize_t offset;
844 int queued_entries = 0;
845
846 spin_lock_irq(&rb->s_lock);
847 list_for_each_entry(c, &pblk->compl_list, list)
848 queued_entries++;
849 spin_unlock_irq(&rb->s_lock);
850
851 if (rb->sync_point != EMPTY_ENTRY)
852 offset = scnprintf(buf, PAGE_SIZE,
853 "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
854 rb->nr_entries,
855 rb->mem,
856 rb->subm,
857 rb->sync,
858 rb->l2p_update,
859#ifdef CONFIG_NVM_DEBUG
860 atomic_read(&rb->inflight_sync_point),
861#else
862 0,
863#endif
864 rb->sync_point,
865 pblk_rb_read_count(rb),
866 pblk_rb_space(rb),
867 pblk_rb_sync_point_count(rb),
868 queued_entries);
869 else
870 offset = scnprintf(buf, PAGE_SIZE,
871 "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n",
872 rb->nr_entries,
873 rb->mem,
874 rb->subm,
875 rb->sync,
876 rb->l2p_update,
877#ifdef CONFIG_NVM_DEBUG
878 atomic_read(&rb->inflight_sync_point),
879#else
880 0,
881#endif
882 pblk_rb_read_count(rb),
883 pblk_rb_space(rb),
884 pblk_rb_sync_point_count(rb),
885 queued_entries);
886
887 return offset;
888}