Merge tag 'for-linus-20160324' of git://git.infradead.org/linux-mtd
Pull MTD updates from Brian Norris: "NAND: - Add sunxi_nand randomizer support - begin refactoring NAND ecclayout structs - fix pxa3xx_nand dmaengine usage - brcmnand: fix support for v7.1 controller - add Qualcomm NAND controller driver SPI NOR: - add new ls1021a, ls2080a support to Freescale QuadSPI - add new flash ID entries - support bottom-block protection for Winbond flash - support Status Register Write Protect - remove broken QPI support for Micron SPI flash JFFS2: - improve post-mount CRC scan efficiency General: - refactor bcm63xxpart parser, to later extend for NAND - add writebuf size parameter to mtdram Other minor code quality improvements" * tag 'for-linus-20160324' of git://git.infradead.org/linux-mtd: (72 commits) mtd: nand: remove kerneldoc for removed function parameter mtd: nand: Qualcomm NAND controller driver dt/bindings: qcom_nandc: Add DT bindings mtd: nand: don't select chip in nand_chip's block_bad op mtd: spi-nor: support lock/unlock for a few Winbond chips mtd: spi-nor: add TB (Top/Bottom) protect support mtd: spi-nor: add SPI_NOR_HAS_LOCK flag mtd: spi-nor: use BIT() for flash_info flags mtd: spi-nor: disallow further writes to SR if WP# is low mtd: spi-nor: make lock/unlock bounds checks more obvious and robust mtd: spi-nor: silently drop lock/unlock for already locked/unlocked region mtd: spi-nor: wait for SR_WIP to clear on initial unlock mtd: nand: simplify nand_bch_init() usage mtd: mtdswap: remove useless if (!mtd->ecclayout) test mtd: create an mtd_oobavail() helper and make use of it mtd: kill the ecclayout->oobavail field mtd: nand: check status before reporting timeout mtd: bcm63xxpart: give width specifier an 'int', not 'size_t' mtd: mtdram: Add parameter for setting writebuf size mtd: nand: pxa3xx_nand: kill unused field 'drcmr_cmd' ...
此提交包含在:
@@ -134,38 +134,60 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
|
||||
if (mutex_lock_interruptible(&c->alloc_sem))
|
||||
return -EINTR;
|
||||
|
||||
|
||||
for (;;) {
|
||||
/* We can't start doing GC until we've finished checking
|
||||
the node CRCs etc. */
|
||||
int bucket, want_ino;
|
||||
|
||||
spin_lock(&c->erase_completion_lock);
|
||||
if (!c->unchecked_size)
|
||||
break;
|
||||
|
||||
/* We can't start doing GC yet. We haven't finished checking
|
||||
the node CRCs etc. Do it now. */
|
||||
|
||||
/* checked_ino is protected by the alloc_sem */
|
||||
if (c->checked_ino > c->highest_ino && xattr) {
|
||||
pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
|
||||
c->unchecked_size);
|
||||
jffs2_dbg_dump_block_lists_nolock(c);
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
mutex_unlock(&c->alloc_sem);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
spin_unlock(&c->erase_completion_lock);
|
||||
|
||||
if (!xattr)
|
||||
xattr = jffs2_verify_xattr(c);
|
||||
|
||||
spin_lock(&c->inocache_lock);
|
||||
/* Instead of doing the inodes in numeric order, doing a lookup
|
||||
* in the hash for each possible number, just walk the hash
|
||||
* buckets of *existing* inodes. This means that we process
|
||||
* them out-of-order, but it can be a lot faster if there's
|
||||
* a sparse inode# space. Which there often is. */
|
||||
want_ino = c->check_ino;
|
||||
for (bucket = c->check_ino % c->inocache_hashsize ; bucket < c->inocache_hashsize; bucket++) {
|
||||
for (ic = c->inocache_list[bucket]; ic; ic = ic->next) {
|
||||
if (ic->ino < want_ino)
|
||||
continue;
|
||||
|
||||
ic = jffs2_get_ino_cache(c, c->checked_ino++);
|
||||
if (ic->state != INO_STATE_CHECKEDABSENT &&
|
||||
ic->state != INO_STATE_PRESENT)
|
||||
goto got_next; /* with inocache_lock held */
|
||||
|
||||
if (!ic) {
|
||||
spin_unlock(&c->inocache_lock);
|
||||
continue;
|
||||
jffs2_dbg(1, "Skipping ino #%u already checked\n",
|
||||
ic->ino);
|
||||
}
|
||||
want_ino = 0;
|
||||
}
|
||||
|
||||
/* Point c->check_ino past the end of the last bucket. */
|
||||
c->check_ino = ((c->highest_ino + c->inocache_hashsize + 1) &
|
||||
~c->inocache_hashsize) - 1;
|
||||
|
||||
spin_unlock(&c->inocache_lock);
|
||||
|
||||
pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
|
||||
c->unchecked_size);
|
||||
jffs2_dbg_dump_block_lists_nolock(c);
|
||||
mutex_unlock(&c->alloc_sem);
|
||||
return -ENOSPC;
|
||||
|
||||
got_next:
|
||||
/* For next time round the loop, we want c->checked_ino to indicate
|
||||
* the *next* one we want to check. And since we're walking the
|
||||
* buckets rather than doing it sequentially, it's: */
|
||||
c->check_ino = ic->ino + c->inocache_hashsize;
|
||||
|
||||
if (!ic->pino_nlink) {
|
||||
jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n",
|
||||
ic->ino);
|
||||
@@ -176,8 +198,6 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
|
||||
switch(ic->state) {
|
||||
case INO_STATE_CHECKEDABSENT:
|
||||
case INO_STATE_PRESENT:
|
||||
jffs2_dbg(1, "Skipping ino #%u already checked\n",
|
||||
ic->ino);
|
||||
spin_unlock(&c->inocache_lock);
|
||||
continue;
|
||||
|
||||
@@ -196,7 +216,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
|
||||
ic->ino);
|
||||
/* We need to come back again for the _same_ inode. We've
|
||||
made no progress in this case, but that should be OK */
|
||||
c->checked_ino--;
|
||||
c->check_ino = ic->ino;
|
||||
|
||||
mutex_unlock(&c->alloc_sem);
|
||||
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
|
||||
|
@@ -49,7 +49,7 @@ struct jffs2_sb_info {
|
||||
struct mtd_info *mtd;
|
||||
|
||||
uint32_t highest_ino;
|
||||
uint32_t checked_ino;
|
||||
uint32_t check_ino; /* *NEXT* inode to be checked */
|
||||
|
||||
unsigned int flags;
|
||||
|
||||
|
@@ -846,8 +846,8 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c)
|
||||
return 1;
|
||||
|
||||
if (c->unchecked_size) {
|
||||
jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
|
||||
c->unchecked_size, c->checked_ino);
|
||||
jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, check_ino #%d\n",
|
||||
c->unchecked_size, c->check_ino);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@@ -1183,22 +1183,20 @@ void jffs2_dirty_trigger(struct jffs2_sb_info *c)
|
||||
|
||||
int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
|
||||
{
|
||||
struct nand_ecclayout *oinfo = c->mtd->ecclayout;
|
||||
|
||||
if (!c->mtd->oobsize)
|
||||
return 0;
|
||||
|
||||
/* Cleanmarker is out-of-band, so inline size zero */
|
||||
c->cleanmarker_size = 0;
|
||||
|
||||
if (!oinfo || oinfo->oobavail == 0) {
|
||||
if (c->mtd->oobavail == 0) {
|
||||
pr_err("inconsistent device description\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
jffs2_dbg(1, "using OOB on NAND\n");
|
||||
|
||||
c->oobavail = oinfo->oobavail;
|
||||
c->oobavail = c->mtd->oobavail;
|
||||
|
||||
/* Initialise write buffer */
|
||||
init_rwsem(&c->wbuf_sem);
|
||||
|
新增問題並參考
封鎖使用者