[6ac6a5c8] | 1 | #include "rtems-jffs2-config.h" |
---|
| 2 | |
---|
[0c0f128] | 3 | /* |
---|
| 4 | * JFFS2 -- Journalling Flash File System, Version 2. |
---|
| 5 | * |
---|
| 6 | * Copyright © 2001-2007 Red Hat, Inc. |
---|
| 7 | * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> |
---|
| 8 | * |
---|
| 9 | * Created by David Woodhouse <dwmw2@infradead.org> |
---|
| 10 | * |
---|
| 11 | * For licensing information, see the file 'LICENCE' in this directory. |
---|
| 12 | * |
---|
| 13 | */ |
---|
| 14 | |
---|
| 15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
| 16 | |
---|
| 17 | #include <linux/kernel.h> |
---|
| 18 | #include <linux/sched.h> |
---|
| 19 | #include <linux/slab.h> |
---|
| 20 | #include <linux/vmalloc.h> |
---|
| 21 | #include <linux/mtd/mtd.h> |
---|
| 22 | #include "nodelist.h" |
---|
| 23 | |
---|
| 24 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, |
---|
| 25 | struct jffs2_inode_cache *, struct jffs2_full_dirent **); |
---|
| 26 | |
---|
| 27 | static inline struct jffs2_inode_cache * |
---|
| 28 | first_inode_chain(int *i, struct jffs2_sb_info *c) |
---|
| 29 | { |
---|
| 30 | for (; *i < c->inocache_hashsize; (*i)++) { |
---|
| 31 | if (c->inocache_list[*i]) |
---|
| 32 | return c->inocache_list[*i]; |
---|
| 33 | } |
---|
| 34 | return NULL; |
---|
| 35 | } |
---|
| 36 | |
---|
| 37 | static inline struct jffs2_inode_cache * |
---|
| 38 | next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) |
---|
| 39 | { |
---|
| 40 | /* More in this chain? */ |
---|
| 41 | if (ic->next) |
---|
| 42 | return ic->next; |
---|
| 43 | (*i)++; |
---|
| 44 | return first_inode_chain(i, c); |
---|
| 45 | } |
---|
| 46 | |
---|
| 47 | #define for_each_inode(i, c, ic) \ |
---|
| 48 | for (i = 0, ic = first_inode_chain(&i, (c)); \ |
---|
| 49 | ic; \ |
---|
| 50 | ic = next_inode(&i, ic, (c))) |
---|
| 51 | |
---|
| 52 | |
---|
| 53 | static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, |
---|
| 54 | struct jffs2_inode_cache *ic) |
---|
| 55 | { |
---|
| 56 | struct jffs2_full_dirent *fd; |
---|
| 57 | |
---|
| 58 | dbg_fsbuild("building directory inode #%u\n", ic->ino); |
---|
| 59 | |
---|
| 60 | /* For each child, increase nlink */ |
---|
| 61 | for(fd = ic->scan_dents; fd; fd = fd->next) { |
---|
| 62 | struct jffs2_inode_cache *child_ic; |
---|
| 63 | if (!fd->ino) |
---|
| 64 | continue; |
---|
| 65 | |
---|
| 66 | /* we can get high latency here with huge directories */ |
---|
| 67 | |
---|
| 68 | child_ic = jffs2_get_ino_cache(c, fd->ino); |
---|
| 69 | if (!child_ic) { |
---|
| 70 | dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", |
---|
| 71 | fd->name, fd->ino, ic->ino); |
---|
| 72 | jffs2_mark_node_obsolete(c, fd->raw); |
---|
| 73 | continue; |
---|
| 74 | } |
---|
| 75 | |
---|
| 76 | if (fd->type == DT_DIR) { |
---|
| 77 | if (child_ic->pino_nlink) { |
---|
| 78 | JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", |
---|
| 79 | fd->name, fd->ino, ic->ino); |
---|
| 80 | /* TODO: What do we do about it? */ |
---|
| 81 | } else { |
---|
| 82 | child_ic->pino_nlink = ic->ino; |
---|
| 83 | } |
---|
| 84 | } else |
---|
| 85 | child_ic->pino_nlink++; |
---|
| 86 | |
---|
| 87 | dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); |
---|
| 88 | /* Can't free scan_dents so far. We might need them in pass 2 */ |
---|
| 89 | } |
---|
| 90 | } |
---|
| 91 | |
---|
| 92 | /* Scan plan: |
---|
| 93 | - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go |
---|
| 94 | - Scan directory tree from top down, setting nlink in inocaches |
---|
| 95 | - Scan inocaches for inodes with nlink==0 |
---|
| 96 | */ |
---|
| 97 | static int jffs2_build_filesystem(struct jffs2_sb_info *c) |
---|
| 98 | { |
---|
| 99 | int ret; |
---|
| 100 | int i; |
---|
| 101 | struct jffs2_inode_cache *ic; |
---|
| 102 | struct jffs2_full_dirent *fd; |
---|
| 103 | struct jffs2_full_dirent *dead_fds = NULL; |
---|
| 104 | |
---|
| 105 | dbg_fsbuild("build FS data structures\n"); |
---|
| 106 | |
---|
| 107 | /* First, scan the medium and build all the inode caches with |
---|
| 108 | lists of physical nodes */ |
---|
| 109 | |
---|
| 110 | c->flags |= JFFS2_SB_FLAG_SCANNING; |
---|
| 111 | ret = jffs2_scan_medium(c); |
---|
| 112 | c->flags &= ~JFFS2_SB_FLAG_SCANNING; |
---|
| 113 | if (ret) |
---|
| 114 | goto exit; |
---|
| 115 | |
---|
| 116 | dbg_fsbuild("scanned flash completely\n"); |
---|
| 117 | jffs2_dbg_dump_block_lists_nolock(c); |
---|
| 118 | |
---|
| 119 | dbg_fsbuild("pass 1 starting\n"); |
---|
| 120 | c->flags |= JFFS2_SB_FLAG_BUILDING; |
---|
| 121 | /* Now scan the directory tree, increasing nlink according to every dirent found. */ |
---|
| 122 | for_each_inode(i, c, ic) { |
---|
| 123 | if (ic->scan_dents) { |
---|
| 124 | jffs2_build_inode_pass1(c, ic); |
---|
| 125 | cond_resched(); |
---|
| 126 | } |
---|
| 127 | } |
---|
| 128 | |
---|
| 129 | dbg_fsbuild("pass 1 complete\n"); |
---|
| 130 | |
---|
| 131 | /* Next, scan for inodes with nlink == 0 and remove them. If |
---|
| 132 | they were directories, then decrement the nlink of their |
---|
| 133 | children too, and repeat the scan. As that's going to be |
---|
| 134 | a fairly uncommon occurrence, it's not so evil to do it this |
---|
| 135 | way. Recursion bad. */ |
---|
| 136 | dbg_fsbuild("pass 2 starting\n"); |
---|
| 137 | |
---|
| 138 | for_each_inode(i, c, ic) { |
---|
| 139 | if (ic->pino_nlink) |
---|
| 140 | continue; |
---|
| 141 | |
---|
| 142 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); |
---|
| 143 | cond_resched(); |
---|
| 144 | } |
---|
| 145 | |
---|
| 146 | dbg_fsbuild("pass 2a starting\n"); |
---|
| 147 | |
---|
| 148 | while (dead_fds) { |
---|
| 149 | fd = dead_fds; |
---|
| 150 | dead_fds = fd->next; |
---|
| 151 | |
---|
| 152 | ic = jffs2_get_ino_cache(c, fd->ino); |
---|
| 153 | |
---|
| 154 | if (ic) |
---|
| 155 | jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); |
---|
| 156 | jffs2_free_full_dirent(fd); |
---|
| 157 | } |
---|
| 158 | |
---|
| 159 | dbg_fsbuild("pass 2a complete\n"); |
---|
| 160 | dbg_fsbuild("freeing temporary data structures\n"); |
---|
| 161 | |
---|
| 162 | /* Finally, we can scan again and free the dirent structs */ |
---|
| 163 | for_each_inode(i, c, ic) { |
---|
| 164 | while(ic->scan_dents) { |
---|
| 165 | fd = ic->scan_dents; |
---|
| 166 | ic->scan_dents = fd->next; |
---|
| 167 | jffs2_free_full_dirent(fd); |
---|
| 168 | } |
---|
| 169 | ic->scan_dents = NULL; |
---|
| 170 | cond_resched(); |
---|
| 171 | } |
---|
| 172 | jffs2_build_xattr_subsystem(c); |
---|
| 173 | c->flags &= ~JFFS2_SB_FLAG_BUILDING; |
---|
| 174 | |
---|
| 175 | dbg_fsbuild("FS build complete\n"); |
---|
| 176 | |
---|
| 177 | /* Rotate the lists by some number to ensure wear levelling */ |
---|
| 178 | jffs2_rotate_lists(c); |
---|
| 179 | |
---|
| 180 | ret = 0; |
---|
| 181 | |
---|
| 182 | exit: |
---|
| 183 | if (ret) { |
---|
| 184 | for_each_inode(i, c, ic) { |
---|
| 185 | while(ic->scan_dents) { |
---|
| 186 | fd = ic->scan_dents; |
---|
| 187 | ic->scan_dents = fd->next; |
---|
| 188 | jffs2_free_full_dirent(fd); |
---|
| 189 | } |
---|
| 190 | } |
---|
| 191 | jffs2_clear_xattr_subsystem(c); |
---|
| 192 | } |
---|
| 193 | |
---|
| 194 | return ret; |
---|
| 195 | } |
---|
| 196 | |
---|
| 197 | static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, |
---|
| 198 | struct jffs2_inode_cache *ic, |
---|
| 199 | struct jffs2_full_dirent **dead_fds) |
---|
| 200 | { |
---|
| 201 | struct jffs2_raw_node_ref *raw; |
---|
| 202 | struct jffs2_full_dirent *fd; |
---|
| 203 | |
---|
| 204 | dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino); |
---|
| 205 | |
---|
| 206 | raw = ic->nodes; |
---|
| 207 | while (raw != (void *)ic) { |
---|
| 208 | struct jffs2_raw_node_ref *next = raw->next_in_ino; |
---|
| 209 | dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw)); |
---|
| 210 | jffs2_mark_node_obsolete(c, raw); |
---|
| 211 | raw = next; |
---|
| 212 | } |
---|
| 213 | |
---|
| 214 | if (ic->scan_dents) { |
---|
| 215 | int whinged = 0; |
---|
| 216 | dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino); |
---|
| 217 | |
---|
| 218 | while(ic->scan_dents) { |
---|
| 219 | struct jffs2_inode_cache *child_ic; |
---|
| 220 | |
---|
| 221 | fd = ic->scan_dents; |
---|
| 222 | ic->scan_dents = fd->next; |
---|
| 223 | |
---|
| 224 | if (!fd->ino) { |
---|
| 225 | /* It's a deletion dirent. Ignore it */ |
---|
| 226 | dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name); |
---|
| 227 | jffs2_free_full_dirent(fd); |
---|
| 228 | continue; |
---|
| 229 | } |
---|
| 230 | if (!whinged) |
---|
| 231 | whinged = 1; |
---|
| 232 | |
---|
| 233 | dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino); |
---|
| 234 | |
---|
| 235 | child_ic = jffs2_get_ino_cache(c, fd->ino); |
---|
| 236 | if (!child_ic) { |
---|
| 237 | dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n", |
---|
| 238 | fd->name, fd->ino); |
---|
| 239 | jffs2_free_full_dirent(fd); |
---|
| 240 | continue; |
---|
| 241 | } |
---|
| 242 | |
---|
| 243 | /* Reduce nlink of the child. If it's now zero, stick it on the |
---|
| 244 | dead_fds list to be cleaned up later. Else just free the fd */ |
---|
| 245 | |
---|
| 246 | if (fd->type == DT_DIR) |
---|
| 247 | child_ic->pino_nlink = 0; |
---|
| 248 | else |
---|
| 249 | child_ic->pino_nlink--; |
---|
| 250 | |
---|
| 251 | if (!child_ic->pino_nlink) { |
---|
| 252 | dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", |
---|
| 253 | fd->ino, fd->name); |
---|
| 254 | fd->next = *dead_fds; |
---|
| 255 | *dead_fds = fd; |
---|
| 256 | } else { |
---|
| 257 | dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", |
---|
| 258 | fd->ino, fd->name, child_ic->pino_nlink); |
---|
| 259 | jffs2_free_full_dirent(fd); |
---|
| 260 | } |
---|
| 261 | } |
---|
| 262 | } |
---|
| 263 | |
---|
| 264 | /* |
---|
| 265 | We don't delete the inocache from the hash list and free it yet. |
---|
| 266 | The erase code will do that, when all the nodes are completely gone. |
---|
| 267 | */ |
---|
| 268 | } |
---|
| 269 | |
---|
| 270 | static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) |
---|
| 271 | { |
---|
| 272 | uint32_t size; |
---|
| 273 | |
---|
| 274 | /* Deletion should almost _always_ be allowed. We're fairly |
---|
| 275 | buggered once we stop allowing people to delete stuff |
---|
| 276 | because there's not enough free space... */ |
---|
| 277 | c->resv_blocks_deletion = 2; |
---|
| 278 | |
---|
| 279 | /* Be conservative about how much space we need before we allow writes. |
---|
| 280 | On top of that which is required for deletia, require an extra 2% |
---|
| 281 | of the medium to be available, for overhead caused by nodes being |
---|
| 282 | split across blocks, etc. */ |
---|
| 283 | |
---|
| 284 | size = c->flash_size / 50; /* 2% of flash size */ |
---|
| 285 | size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */ |
---|
| 286 | size += c->sector_size - 1; /* ... and round up */ |
---|
| 287 | |
---|
| 288 | c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size); |
---|
| 289 | |
---|
| 290 | /* When do we let the GC thread run in the background */ |
---|
| 291 | |
---|
| 292 | c->resv_blocks_gctrigger = c->resv_blocks_write + 1; |
---|
| 293 | |
---|
| 294 | /* When do we allow garbage collection to merge nodes to make |
---|
| 295 | long-term progress at the expense of short-term space exhaustion? */ |
---|
| 296 | c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; |
---|
| 297 | |
---|
| 298 | /* When do we allow garbage collection to eat from bad blocks rather |
---|
| 299 | than actually making progress? */ |
---|
| 300 | c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2; |
---|
| 301 | |
---|
| 302 | /* What number of 'very dirty' eraseblocks do we allow before we |
---|
| 303 | trigger the GC thread even if we don't _need_ the space. When we |
---|
| 304 | can't mark nodes obsolete on the medium, the old dirty nodes cause |
---|
| 305 | performance problems because we have to inspect and discard them. */ |
---|
| 306 | c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger; |
---|
| 307 | if (jffs2_can_mark_obsolete(c)) |
---|
| 308 | c->vdirty_blocks_gctrigger *= 10; |
---|
| 309 | |
---|
| 310 | /* If there's less than this amount of dirty space, don't bother |
---|
| 311 | trying to GC to make more space. It'll be a fruitless task */ |
---|
| 312 | c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); |
---|
| 313 | |
---|
| 314 | dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", |
---|
| 315 | c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); |
---|
| 316 | dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n", |
---|
| 317 | c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); |
---|
| 318 | dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n", |
---|
| 319 | c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024); |
---|
| 320 | dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n", |
---|
| 321 | c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024); |
---|
| 322 | dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n", |
---|
| 323 | c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024); |
---|
| 324 | dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n", |
---|
| 325 | c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024); |
---|
| 326 | dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n", |
---|
| 327 | c->nospc_dirty_size); |
---|
| 328 | dbg_fsbuild("Very dirty blocks before GC triggered: %d\n", |
---|
| 329 | c->vdirty_blocks_gctrigger); |
---|
| 330 | } |
---|
| 331 | |
---|
| 332 | int jffs2_do_mount_fs(struct jffs2_sb_info *c) |
---|
| 333 | { |
---|
| 334 | int ret; |
---|
| 335 | int i; |
---|
| 336 | int size; |
---|
| 337 | |
---|
| 338 | c->free_size = c->flash_size; |
---|
| 339 | c->nr_blocks = c->flash_size / c->sector_size; |
---|
| 340 | size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; |
---|
| 341 | #ifndef __ECOS |
---|
| 342 | if (jffs2_blocks_use_vmalloc(c)) |
---|
| 343 | c->blocks = vzalloc(size); |
---|
| 344 | else |
---|
| 345 | #endif |
---|
| 346 | c->blocks = kzalloc(size, GFP_KERNEL); |
---|
| 347 | if (!c->blocks) |
---|
| 348 | return -ENOMEM; |
---|
| 349 | |
---|
| 350 | for (i=0; i<c->nr_blocks; i++) { |
---|
| 351 | INIT_LIST_HEAD(&c->blocks[i].list); |
---|
| 352 | c->blocks[i].offset = i * c->sector_size; |
---|
| 353 | c->blocks[i].free_size = c->sector_size; |
---|
| 354 | } |
---|
| 355 | |
---|
| 356 | INIT_LIST_HEAD(&c->clean_list); |
---|
| 357 | INIT_LIST_HEAD(&c->very_dirty_list); |
---|
| 358 | INIT_LIST_HEAD(&c->dirty_list); |
---|
| 359 | INIT_LIST_HEAD(&c->erasable_list); |
---|
| 360 | INIT_LIST_HEAD(&c->erasing_list); |
---|
| 361 | INIT_LIST_HEAD(&c->erase_checking_list); |
---|
| 362 | INIT_LIST_HEAD(&c->erase_pending_list); |
---|
| 363 | INIT_LIST_HEAD(&c->erasable_pending_wbuf_list); |
---|
| 364 | INIT_LIST_HEAD(&c->erase_complete_list); |
---|
| 365 | INIT_LIST_HEAD(&c->free_list); |
---|
| 366 | INIT_LIST_HEAD(&c->bad_list); |
---|
| 367 | INIT_LIST_HEAD(&c->bad_used_list); |
---|
| 368 | c->highest_ino = 1; |
---|
| 369 | c->summary = NULL; |
---|
| 370 | |
---|
| 371 | ret = jffs2_sum_init(c); |
---|
| 372 | if (ret) |
---|
| 373 | goto out_free; |
---|
| 374 | |
---|
| 375 | if (jffs2_build_filesystem(c)) { |
---|
| 376 | dbg_fsbuild("build_fs failed\n"); |
---|
| 377 | jffs2_free_ino_caches(c); |
---|
| 378 | jffs2_free_raw_node_refs(c); |
---|
| 379 | ret = -EIO; |
---|
| 380 | goto out_free; |
---|
| 381 | } |
---|
| 382 | |
---|
| 383 | jffs2_calc_trigger_levels(c); |
---|
| 384 | |
---|
| 385 | return 0; |
---|
| 386 | |
---|
| 387 | out_free: |
---|
| 388 | #ifndef __ECOS |
---|
| 389 | if (jffs2_blocks_use_vmalloc(c)) |
---|
| 390 | vfree(c->blocks); |
---|
| 391 | else |
---|
| 392 | #endif |
---|
| 393 | kfree(c->blocks); |
---|
| 394 | |
---|
| 395 | return ret; |
---|
| 396 | } |
---|