blob: 5e1e8caf54e8d50e672fffa5f8f03199b9b4d237 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Estelle Hammache9b88f472005-01-28 18:53:05 +000010 * $Id: nodemgmt.c,v 1.117 2005/01/25 20:11:11 hammache Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/mtd/mtd.h>
17#include <linux/compiler.h>
18#include <linux/sched.h> /* For cond_resched() */
19#include "nodelist.h"
20
21/**
22 * jffs2_reserve_space - request physical space to write nodes to flash
23 * @c: superblock info
24 * @minsize: Minimum acceptable size of allocation
25 * @ofs: Returned value of node offset
26 * @len: Returned value of allocation length
27 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
28 *
29 * Requests a block of physical space on the flash. Returns zero for success
30 * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
31 * or other error if appropriate.
32 *
33 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34 * allocation semaphore, to prevent more than one allocation from being
35 * active at any time. The semaphore is later released by jffs2_commit_allocation()
36 *
37 * jffs2_reserve_space() may trigger garbage collection in order to make room
38 * for the requested allocation.
39 */
40
41static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len);
42
43int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
44{
45 int ret = -EAGAIN;
46 int blocksneeded = c->resv_blocks_write;
47 /* align it */
48 minsize = PAD(minsize);
49
50 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51 down(&c->alloc_sem);
52
53 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54
55 spin_lock(&c->erase_completion_lock);
56
57 /* this needs a little more thought (true <tglx> :)) */
58 while(ret == -EAGAIN) {
59 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60 int ret;
61 uint32_t dirty, avail;
62
63 /* calculate real dirty size
64 * dirty_size contains blocks on erase_pending_list
65 * those blocks are counted in c->nr_erasing_blocks.
66 * If one block is actually erased, it is not longer counted as dirty_space
67 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68 * with c->nr_erasing_blocks * c->sector_size again.
69 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70 * This helps us to force gc and pick eventually a clean block to spread the load.
71 * We add unchecked_size here, as we hopefully will find some space to use.
72 * This will affect the sum only once, as gc first finishes checking
73 * of nodes.
74 */
75 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76 if (dirty < c->nospc_dirty_size) {
77 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78 printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n");
79 break;
80 }
81 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
82 dirty, c->unchecked_size, c->sector_size));
83
84 spin_unlock(&c->erase_completion_lock);
85 up(&c->alloc_sem);
86 return -ENOSPC;
87 }
88
89 /* Calc possibly available space. Possibly available means that we
90 * don't know, if unchecked size contains obsoleted nodes, which could give us some
91 * more usable space. This will affect the sum only once, as gc first finishes checking
92 * of nodes.
93 + Return -ENOSPC, if the maximum possibly available space is less or equal than
94 * blocksneeded * sector_size.
95 * This blocks endless gc looping on a filesystem, which is nearly full, even if
96 * the check above passes.
97 */
98 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99 if ( (avail / c->sector_size) <= blocksneeded) {
100 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
101 printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n");
102 break;
103 }
104
105 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
106 avail, blocksneeded * c->sector_size));
107 spin_unlock(&c->erase_completion_lock);
108 up(&c->alloc_sem);
109 return -ENOSPC;
110 }
111
112 up(&c->alloc_sem);
113
114 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
115 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
116 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
117 spin_unlock(&c->erase_completion_lock);
118
119 ret = jffs2_garbage_collect_pass(c);
120 if (ret)
121 return ret;
122
123 cond_resched();
124
125 if (signal_pending(current))
126 return -EINTR;
127
128 down(&c->alloc_sem);
129 spin_lock(&c->erase_completion_lock);
130 }
131
132 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
133 if (ret) {
134 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
135 }
136 }
137 spin_unlock(&c->erase_completion_lock);
138 if (ret)
139 up(&c->alloc_sem);
140 return ret;
141}
142
143int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
144{
145 int ret = -EAGAIN;
146 minsize = PAD(minsize);
147
148 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
149
150 spin_lock(&c->erase_completion_lock);
151 while(ret == -EAGAIN) {
152 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
153 if (ret) {
154 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
155 }
156 }
157 spin_unlock(&c->erase_completion_lock);
158 return ret;
159}
160
161/* Called with alloc sem _and_ erase_completion_lock */
162static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
163{
164 struct jffs2_eraseblock *jeb = c->nextblock;
165
166 restart:
167 if (jeb && minsize > jeb->free_size) {
168 /* Skip the end of this block and file it as having some dirty space */
169 /* If there's a pending write to it, flush now */
170 if (jffs2_wbuf_dirty(c)) {
171 spin_unlock(&c->erase_completion_lock);
172 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
173 jffs2_flush_wbuf_pad(c);
174 spin_lock(&c->erase_completion_lock);
175 jeb = c->nextblock;
176 goto restart;
177 }
178 c->wasted_size += jeb->free_size;
179 c->free_size -= jeb->free_size;
180 jeb->wasted_size += jeb->free_size;
181 jeb->free_size = 0;
182
183 /* Check, if we have a dirty block now, or if it was dirty already */
184 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
185 c->dirty_size += jeb->wasted_size;
186 c->wasted_size -= jeb->wasted_size;
187 jeb->dirty_size += jeb->wasted_size;
188 jeb->wasted_size = 0;
189 if (VERYDIRTY(c, jeb->dirty_size)) {
190 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
191 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
192 list_add_tail(&jeb->list, &c->very_dirty_list);
193 } else {
194 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
195 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
196 list_add_tail(&jeb->list, &c->dirty_list);
197 }
198 } else {
199 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
200 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
201 list_add_tail(&jeb->list, &c->clean_list);
202 }
203 c->nextblock = jeb = NULL;
204 }
205
206 if (!jeb) {
207 struct list_head *next;
208 /* Take the next block off the 'free' list */
209
210 if (list_empty(&c->free_list)) {
211
212 if (!c->nr_erasing_blocks &&
213 !list_empty(&c->erasable_list)) {
214 struct jffs2_eraseblock *ejeb;
215
216 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
217 list_del(&ejeb->list);
218 list_add_tail(&ejeb->list, &c->erase_pending_list);
219 c->nr_erasing_blocks++;
220 jffs2_erase_pending_trigger(c);
221 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
222 ejeb->offset));
223 }
224
225 if (!c->nr_erasing_blocks &&
226 !list_empty(&c->erasable_pending_wbuf_list)) {
227 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
228 /* c->nextblock is NULL, no update to c->nextblock allowed */
229 spin_unlock(&c->erase_completion_lock);
230 jffs2_flush_wbuf_pad(c);
231 spin_lock(&c->erase_completion_lock);
232 /* Have another go. It'll be on the erasable_list now */
233 return -EAGAIN;
234 }
235
236 if (!c->nr_erasing_blocks) {
237 /* Ouch. We're in GC, or we wouldn't have got here.
238 And there's no space left. At all. */
239 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
240 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
241 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
242 return -ENOSPC;
243 }
244
245 spin_unlock(&c->erase_completion_lock);
246 /* Don't wait for it; just erase one right now */
247 jffs2_erase_pending_blocks(c, 1);
248 spin_lock(&c->erase_completion_lock);
249
250 /* An erase may have failed, decreasing the
251 amount of free space available. So we must
252 restart from the beginning */
253 return -EAGAIN;
254 }
255
256 next = c->free_list.next;
257 list_del(next);
258 c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
259 c->nr_free_blocks--;
260
261 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
262 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
263 goto restart;
264 }
265 }
266 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
267 enough space */
268 *ofs = jeb->offset + (c->sector_size - jeb->free_size);
269 *len = jeb->free_size;
270
271 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
272 !jeb->first_node->next_in_ino) {
273 /* Only node in it beforehand was a CLEANMARKER node (we think).
274 So mark it obsolete now that there's going to be another node
275 in the block. This will reduce used_size to zero but We've
276 already set c->nextblock so that jffs2_mark_node_obsolete()
277 won't try to refile it to the dirty_list.
278 */
279 spin_unlock(&c->erase_completion_lock);
280 jffs2_mark_node_obsolete(c, jeb->first_node);
281 spin_lock(&c->erase_completion_lock);
282 }
283
284 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
285 return 0;
286}
287
288/**
289 * jffs2_add_physical_node_ref - add a physical node reference to the list
290 * @c: superblock info
291 * @new: new node reference to add
292 * @len: length of this physical node
293 * @dirty: dirty flag for new node
294 *
295 * Should only be used to report nodes for which space has been allocated
296 * by jffs2_reserve_space.
297 *
298 * Must be called with the alloc_sem held.
299 */
300
301int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
302{
303 struct jffs2_eraseblock *jeb;
304 uint32_t len;
305
306 jeb = &c->blocks[new->flash_offset / c->sector_size];
307 len = ref_totlen(c, jeb, new);
308
309 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
310#if 1
Estelle Hammache3118db32005-01-24 21:30:25 +0000311 /* we could get some obsolete nodes after nextblock was refiled
312 in wbuf.c */
Estelle Hammache9b88f472005-01-28 18:53:05 +0000313 if ((c->nextblock || !ref_obsolete(new))
314 &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 printk(KERN_WARNING "argh. node added in wrong place\n");
316 jffs2_free_raw_node_ref(new);
317 return -EINVAL;
318 }
319#endif
320 spin_lock(&c->erase_completion_lock);
321
322 if (!jeb->first_node)
323 jeb->first_node = new;
324 if (jeb->last_node)
325 jeb->last_node->next_phys = new;
326 jeb->last_node = new;
327
328 jeb->free_size -= len;
329 c->free_size -= len;
330 if (ref_obsolete(new)) {
331 jeb->dirty_size += len;
332 c->dirty_size += len;
333 } else {
334 jeb->used_size += len;
335 c->used_size += len;
336 }
337
Estelle Hammache9b88f472005-01-28 18:53:05 +0000338 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
340 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
341 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
342 if (jffs2_wbuf_dirty(c)) {
343 /* Flush the last write in the block if it's outstanding */
344 spin_unlock(&c->erase_completion_lock);
345 jffs2_flush_wbuf_pad(c);
346 spin_lock(&c->erase_completion_lock);
347 }
348
349 list_add_tail(&jeb->list, &c->clean_list);
350 c->nextblock = NULL;
351 }
352 ACCT_SANITY_CHECK(c,jeb);
353 D1(ACCT_PARANOIA_CHECK(jeb));
354
355 spin_unlock(&c->erase_completion_lock);
356
357 return 0;
358}
359
360
361void jffs2_complete_reservation(struct jffs2_sb_info *c)
362{
363 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
364 jffs2_garbage_collect_trigger(c);
365 up(&c->alloc_sem);
366}
367
368static inline int on_list(struct list_head *obj, struct list_head *head)
369{
370 struct list_head *this;
371
372 list_for_each(this, head) {
373 if (this == obj) {
374 D1(printk("%p is on list at %p\n", obj, head));
375 return 1;
376
377 }
378 }
379 return 0;
380}
381
382void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
383{
384 struct jffs2_eraseblock *jeb;
385 int blocknr;
386 struct jffs2_unknown_node n;
387 int ret, addedsize;
388 size_t retlen;
389
390 if(!ref) {
391 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
392 return;
393 }
394 if (ref_obsolete(ref)) {
395 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
396 return;
397 }
398 blocknr = ref->flash_offset / c->sector_size;
399 if (blocknr >= c->nr_blocks) {
400 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
401 BUG();
402 }
403 jeb = &c->blocks[blocknr];
404
405 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
406 !(c->flags & JFFS2_SB_FLAG_MOUNTING)) {
407 /* Hm. This may confuse static lock analysis. If any of the above
408 three conditions is false, we're going to return from this
409 function without actually obliterating any nodes or freeing
410 any jffs2_raw_node_refs. So we don't need to stop erases from
411 happening, or protect against people holding an obsolete
412 jffs2_raw_node_ref without the erase_completion_lock. */
413 down(&c->erase_free_sem);
414 }
415
416 spin_lock(&c->erase_completion_lock);
417
418 if (ref_flags(ref) == REF_UNCHECKED) {
419 D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
420 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
421 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
422 BUG();
423 })
424 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
425 jeb->unchecked_size -= ref_totlen(c, jeb, ref);
426 c->unchecked_size -= ref_totlen(c, jeb, ref);
427 } else {
428 D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
429 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
430 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
431 BUG();
432 })
433 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
434 jeb->used_size -= ref_totlen(c, jeb, ref);
435 c->used_size -= ref_totlen(c, jeb, ref);
436 }
437
438 // Take care, that wasted size is taken into concern
439 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
440 D1(printk("Dirtying\n"));
441 addedsize = ref_totlen(c, jeb, ref);
442 jeb->dirty_size += ref_totlen(c, jeb, ref);
443 c->dirty_size += ref_totlen(c, jeb, ref);
444
445 /* Convert wasted space to dirty, if not a bad block */
446 if (jeb->wasted_size) {
447 if (on_list(&jeb->list, &c->bad_used_list)) {
448 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
449 jeb->offset));
450 addedsize = 0; /* To fool the refiling code later */
451 } else {
452 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
453 jeb->wasted_size, jeb->offset));
454 addedsize += jeb->wasted_size;
455 jeb->dirty_size += jeb->wasted_size;
456 c->dirty_size += jeb->wasted_size;
457 c->wasted_size -= jeb->wasted_size;
458 jeb->wasted_size = 0;
459 }
460 }
461 } else {
462 D1(printk("Wasting\n"));
463 addedsize = 0;
464 jeb->wasted_size += ref_totlen(c, jeb, ref);
465 c->wasted_size += ref_totlen(c, jeb, ref);
466 }
467 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
468
469 ACCT_SANITY_CHECK(c, jeb);
470
471 D1(ACCT_PARANOIA_CHECK(jeb));
472
473 if (c->flags & JFFS2_SB_FLAG_MOUNTING) {
474 /* Mount in progress. Don't muck about with the block
475 lists because they're not ready yet, and don't actually
476 obliterate nodes that look obsolete. If they weren't
477 marked obsolete on the flash at the time they _became_
478 obsolete, there was probably a reason for that. */
479 spin_unlock(&c->erase_completion_lock);
480 /* We didn't lock the erase_free_sem */
481 return;
482 }
483
484 if (jeb == c->nextblock) {
485 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
486 } else if (!jeb->used_size && !jeb->unchecked_size) {
487 if (jeb == c->gcblock) {
488 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
489 c->gcblock = NULL;
490 } else {
491 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
492 list_del(&jeb->list);
493 }
494 if (jffs2_wbuf_dirty(c)) {
495 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
496 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
497 } else {
498 if (jiffies & 127) {
499 /* Most of the time, we just erase it immediately. Otherwise we
500 spend ages scanning it on mount, etc. */
501 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
502 list_add_tail(&jeb->list, &c->erase_pending_list);
503 c->nr_erasing_blocks++;
504 jffs2_erase_pending_trigger(c);
505 } else {
506 /* Sometimes, however, we leave it elsewhere so it doesn't get
507 immediately reused, and we spread the load a bit. */
508 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
509 list_add_tail(&jeb->list, &c->erasable_list);
510 }
511 }
512 D1(printk(KERN_DEBUG "Done OK\n"));
513 } else if (jeb == c->gcblock) {
514 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
515 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
516 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
517 list_del(&jeb->list);
518 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
519 list_add_tail(&jeb->list, &c->dirty_list);
520 } else if (VERYDIRTY(c, jeb->dirty_size) &&
521 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
522 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
523 list_del(&jeb->list);
524 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
525 list_add_tail(&jeb->list, &c->very_dirty_list);
526 } else {
527 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
528 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
529 }
530
531 spin_unlock(&c->erase_completion_lock);
532
533 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c)) {
534 /* We didn't lock the erase_free_sem */
535 return;
536 }
537
538 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
539 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
540 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
541 by jffs2_free_all_node_refs() in erase.c. Which is nice. */
542
543 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
544 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
545 if (ret) {
546 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
547 goto out_erase_sem;
548 }
549 if (retlen != sizeof(n)) {
550 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
551 goto out_erase_sem;
552 }
553 if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
554 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
555 goto out_erase_sem;
556 }
557 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
558 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
559 goto out_erase_sem;
560 }
561 /* XXX FIXME: This is ugly now */
562 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
563 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
564 if (ret) {
565 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
566 goto out_erase_sem;
567 }
568 if (retlen != sizeof(n)) {
569 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
570 goto out_erase_sem;
571 }
572
573 /* Nodes which have been marked obsolete no longer need to be
574 associated with any inode. Remove them from the per-inode list.
575
576 Note we can't do this for NAND at the moment because we need
577 obsolete dirent nodes to stay on the lists, because of the
578 horridness in jffs2_garbage_collect_deletion_dirent(). Also
579 because we delete the inocache, and on NAND we need that to
580 stay around until all the nodes are actually erased, in order
581 to stop us from giving the same inode number to another newly
582 created inode. */
583 if (ref->next_in_ino) {
584 struct jffs2_inode_cache *ic;
585 struct jffs2_raw_node_ref **p;
586
587 spin_lock(&c->erase_completion_lock);
588
589 ic = jffs2_raw_ref_to_ic(ref);
590 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
591 ;
592
593 *p = ref->next_in_ino;
594 ref->next_in_ino = NULL;
595
596 if (ic->nodes == (void *)ic) {
597 D1(printk(KERN_DEBUG "inocache for ino #%u is all gone now. Freeing\n", ic->ino));
598 jffs2_del_ino_cache(c, ic);
599 jffs2_free_inode_cache(ic);
600 }
601
602 spin_unlock(&c->erase_completion_lock);
603 }
604
605
606 /* Merge with the next node in the physical list, if there is one
607 and if it's also obsolete and if it doesn't belong to any inode */
608 if (ref->next_phys && ref_obsolete(ref->next_phys) &&
609 !ref->next_phys->next_in_ino) {
610 struct jffs2_raw_node_ref *n = ref->next_phys;
611
612 spin_lock(&c->erase_completion_lock);
613
614 ref->__totlen += n->__totlen;
615 ref->next_phys = n->next_phys;
616 if (jeb->last_node == n) jeb->last_node = ref;
617 if (jeb->gc_node == n) {
618 /* gc will be happy continuing gc on this node */
619 jeb->gc_node=ref;
620 }
621 spin_unlock(&c->erase_completion_lock);
622
623 jffs2_free_raw_node_ref(n);
624 }
625
626 /* Also merge with the previous node in the list, if there is one
627 and that one is obsolete */
628 if (ref != jeb->first_node ) {
629 struct jffs2_raw_node_ref *p = jeb->first_node;
630
631 spin_lock(&c->erase_completion_lock);
632
633 while (p->next_phys != ref)
634 p = p->next_phys;
635
636 if (ref_obsolete(p) && !ref->next_in_ino) {
637 p->__totlen += ref->__totlen;
638 if (jeb->last_node == ref) {
639 jeb->last_node = p;
640 }
641 if (jeb->gc_node == ref) {
642 /* gc will be happy continuing gc on this node */
643 jeb->gc_node=p;
644 }
645 p->next_phys = ref->next_phys;
646 jffs2_free_raw_node_ref(ref);
647 }
648 spin_unlock(&c->erase_completion_lock);
649 }
650 out_erase_sem:
651 up(&c->erase_free_sem);
652}
653
654#if CONFIG_JFFS2_FS_DEBUG >= 2
655void jffs2_dump_block_lists(struct jffs2_sb_info *c)
656{
657
658
659 printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
660 printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
661 printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
662 printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
663 printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
664 printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
665 printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
666 printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
667 printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
668 printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
669 printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write);
670
671 if (c->nextblock) {
672 printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
673 c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
674 } else {
675 printk(KERN_DEBUG "nextblock: NULL\n");
676 }
677 if (c->gcblock) {
678 printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
679 c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
680 } else {
681 printk(KERN_DEBUG "gcblock: NULL\n");
682 }
683 if (list_empty(&c->clean_list)) {
684 printk(KERN_DEBUG "clean_list: empty\n");
685 } else {
686 struct list_head *this;
687 int numblocks = 0;
688 uint32_t dirty = 0;
689
690 list_for_each(this, &c->clean_list) {
691 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
692 numblocks ++;
693 dirty += jeb->wasted_size;
694 printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
695 }
696 printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
697 }
698 if (list_empty(&c->very_dirty_list)) {
699 printk(KERN_DEBUG "very_dirty_list: empty\n");
700 } else {
701 struct list_head *this;
702 int numblocks = 0;
703 uint32_t dirty = 0;
704
705 list_for_each(this, &c->very_dirty_list) {
706 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
707 numblocks ++;
708 dirty += jeb->dirty_size;
709 printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
710 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
711 }
712 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
713 numblocks, dirty, dirty / numblocks);
714 }
715 if (list_empty(&c->dirty_list)) {
716 printk(KERN_DEBUG "dirty_list: empty\n");
717 } else {
718 struct list_head *this;
719 int numblocks = 0;
720 uint32_t dirty = 0;
721
722 list_for_each(this, &c->dirty_list) {
723 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
724 numblocks ++;
725 dirty += jeb->dirty_size;
726 printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
727 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
728 }
729 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
730 numblocks, dirty, dirty / numblocks);
731 }
732 if (list_empty(&c->erasable_list)) {
733 printk(KERN_DEBUG "erasable_list: empty\n");
734 } else {
735 struct list_head *this;
736
737 list_for_each(this, &c->erasable_list) {
738 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
739 printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
740 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
741 }
742 }
743 if (list_empty(&c->erasing_list)) {
744 printk(KERN_DEBUG "erasing_list: empty\n");
745 } else {
746 struct list_head *this;
747
748 list_for_each(this, &c->erasing_list) {
749 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
750 printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
751 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
752 }
753 }
754 if (list_empty(&c->erase_pending_list)) {
755 printk(KERN_DEBUG "erase_pending_list: empty\n");
756 } else {
757 struct list_head *this;
758
759 list_for_each(this, &c->erase_pending_list) {
760 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
761 printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
762 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
763 }
764 }
765 if (list_empty(&c->erasable_pending_wbuf_list)) {
766 printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
767 } else {
768 struct list_head *this;
769
770 list_for_each(this, &c->erasable_pending_wbuf_list) {
771 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
772 printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
773 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
774 }
775 }
776 if (list_empty(&c->free_list)) {
777 printk(KERN_DEBUG "free_list: empty\n");
778 } else {
779 struct list_head *this;
780
781 list_for_each(this, &c->free_list) {
782 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
783 printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
784 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
785 }
786 }
787 if (list_empty(&c->bad_list)) {
788 printk(KERN_DEBUG "bad_list: empty\n");
789 } else {
790 struct list_head *this;
791
792 list_for_each(this, &c->bad_list) {
793 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
794 printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
795 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
796 }
797 }
798 if (list_empty(&c->bad_used_list)) {
799 printk(KERN_DEBUG "bad_used_list: empty\n");
800 } else {
801 struct list_head *this;
802
803 list_for_each(this, &c->bad_used_list) {
804 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
805 printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
806 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
807 }
808 }
809}
810#endif /* CONFIG_JFFS2_FS_DEBUG */
811
812int jffs2_thread_should_wake(struct jffs2_sb_info *c)
813{
814 int ret = 0;
815 uint32_t dirty;
816
817 if (c->unchecked_size) {
818 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
819 c->unchecked_size, c->checked_ino));
820 return 1;
821 }
822
823 /* dirty_size contains blocks on erase_pending_list
824 * those blocks are counted in c->nr_erasing_blocks.
825 * If one block is actually erased, it is not longer counted as dirty_space
826 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
827 * with c->nr_erasing_blocks * c->sector_size again.
828 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
829 * This helps us to force gc and pick eventually a clean block to spread the load.
830 */
831 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
832
833 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
834 (dirty > c->nospc_dirty_size))
835 ret = 1;
836
837 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
838 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
839
840 return ret;
841}