Merge branch 'akpm' (Andrew's patch-bomb)
Merge the rest of Andrew's patches for -rc1: "A bunch of fixes and misc missed-out-on things. That'll do for -rc1. I still have a batch of IPC patches which still have a possible bug report which I'm chasing down." * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (25 commits) keys: use keyring_alloc() to create module signing keyring keys: fix unreachable code sendfile: allows bypassing of notifier events SGI-XP: handle non-fatal traps fat: fix incorrect function comment Documentation: ABI: remove testing/sysfs-devices-node proc: fix inconsistent lock state linux/kernel.h: fix DIV_ROUND_CLOSEST with unsigned divisors memcg: don't register hotcpu notifier from ->css_alloc() checkpatch: warn on uapi #includes that #include <uapi/... revert "rtc: recycle id when unloading a rtc driver" mm: clean up transparent hugepage sysfs error messages hfsplus: add error message for the case of failure of sync fs in delayed_sync_fs() method hfsplus: rework processing of hfs_btree_write() returned error hfsplus: rework processing errors in hfsplus_free_extents() hfsplus: avoid crash on failed block map free kcmp: include linux/ptrace.h drivers/rtc/rtc-imxdi.c: must include <linux/spinlock.h> mm: cma: WARN if freed memory is still in use exec: do not leave bprm->interp on stack ...
这个提交包含在:
@@ -176,12 +176,14 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
|
||||
dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
|
||||
/* are all of the bits in range? */
|
||||
if ((offset + count) > sbi->total_blocks)
|
||||
return -2;
|
||||
return -ENOENT;
|
||||
|
||||
mutex_lock(&sbi->alloc_mutex);
|
||||
mapping = sbi->alloc_file->i_mapping;
|
||||
pnr = offset / PAGE_CACHE_BITS;
|
||||
page = read_mapping_page(mapping, pnr, NULL);
|
||||
if (IS_ERR(page))
|
||||
goto kaboom;
|
||||
pptr = kmap(page);
|
||||
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
|
||||
end = pptr + PAGE_CACHE_BITS / 32;
|
||||
@@ -214,6 +216,8 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
page = read_mapping_page(mapping, ++pnr, NULL);
|
||||
if (IS_ERR(page))
|
||||
goto kaboom;
|
||||
pptr = kmap(page);
|
||||
curr = pptr;
|
||||
end = pptr + PAGE_CACHE_BITS / 32;
|
||||
@@ -232,4 +236,11 @@ out:
|
||||
mutex_unlock(&sbi->alloc_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
kaboom:
|
||||
printk(KERN_CRIT "hfsplus: unable to mark blocks free: error %ld\n",
|
||||
PTR_ERR(page));
|
||||
mutex_unlock(&sbi->alloc_mutex);
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
@@ -159,7 +159,7 @@ void hfs_btree_close(struct hfs_btree *tree)
|
||||
kfree(tree);
|
||||
}
|
||||
|
||||
void hfs_btree_write(struct hfs_btree *tree)
|
||||
int hfs_btree_write(struct hfs_btree *tree)
|
||||
{
|
||||
struct hfs_btree_header_rec *head;
|
||||
struct hfs_bnode *node;
|
||||
@@ -168,7 +168,7 @@ void hfs_btree_write(struct hfs_btree *tree)
|
||||
node = hfs_bnode_find(tree, 0);
|
||||
if (IS_ERR(node))
|
||||
/* panic? */
|
||||
return;
|
||||
return -EIO;
|
||||
/* Load the header */
|
||||
page = node->page[0];
|
||||
head = (struct hfs_btree_header_rec *)(kmap(page) +
|
||||
@@ -186,6 +186,7 @@ void hfs_btree_write(struct hfs_btree *tree)
|
||||
kunmap(page);
|
||||
set_page_dirty(page);
|
||||
hfs_bnode_put(node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
|
||||
|
@@ -329,6 +329,7 @@ static int hfsplus_free_extents(struct super_block *sb,
|
||||
{
|
||||
u32 count, start;
|
||||
int i;
|
||||
int err = 0;
|
||||
|
||||
hfsplus_dump_extent(extent);
|
||||
for (i = 0; i < 8; extent++, i++) {
|
||||
@@ -345,18 +346,33 @@ found:
|
||||
for (;;) {
|
||||
start = be32_to_cpu(extent->start_block);
|
||||
if (count <= block_nr) {
|
||||
hfsplus_block_free(sb, start, count);
|
||||
err = hfsplus_block_free(sb, start, count);
|
||||
if (err) {
|
||||
printk(KERN_ERR "hfs: can't free extent\n");
|
||||
dprint(DBG_EXTENT, " start: %u count: %u\n",
|
||||
start, count);
|
||||
}
|
||||
extent->block_count = 0;
|
||||
extent->start_block = 0;
|
||||
block_nr -= count;
|
||||
} else {
|
||||
count -= block_nr;
|
||||
hfsplus_block_free(sb, start + count, block_nr);
|
||||
err = hfsplus_block_free(sb, start + count, block_nr);
|
||||
if (err) {
|
||||
printk(KERN_ERR "hfs: can't free extent\n");
|
||||
dprint(DBG_EXTENT, " start: %u count: %u\n",
|
||||
start, count);
|
||||
}
|
||||
extent->block_count = cpu_to_be32(count);
|
||||
block_nr = 0;
|
||||
}
|
||||
if (!block_nr || !i)
|
||||
return 0;
|
||||
if (!block_nr || !i) {
|
||||
/*
|
||||
* Try to free all extents and
|
||||
* return only last error
|
||||
*/
|
||||
return err;
|
||||
}
|
||||
i--;
|
||||
extent--;
|
||||
count = be32_to_cpu(extent->block_count);
|
||||
|
@@ -335,7 +335,7 @@ int hfsplus_block_free(struct super_block *, u32, u32);
|
||||
/* btree.c */
|
||||
struct hfs_btree *hfs_btree_open(struct super_block *, u32);
|
||||
void hfs_btree_close(struct hfs_btree *);
|
||||
void hfs_btree_write(struct hfs_btree *);
|
||||
int hfs_btree_write(struct hfs_btree *);
|
||||
struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *);
|
||||
void hfs_bmap_free(struct hfs_bnode *);
|
||||
|
||||
|
@@ -127,8 +127,14 @@ static int hfsplus_system_write_inode(struct inode *inode)
|
||||
hfsplus_mark_mdb_dirty(inode->i_sb);
|
||||
}
|
||||
hfsplus_inode_write_fork(inode, fork);
|
||||
if (tree)
|
||||
hfs_btree_write(tree);
|
||||
if (tree) {
|
||||
int err = hfs_btree_write(tree);
|
||||
if (err) {
|
||||
printk(KERN_ERR "hfs: b-tree write err: %d, ino %lu\n",
|
||||
err, inode->i_ino);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -226,6 +232,7 @@ out:
|
||||
|
||||
static void delayed_sync_fs(struct work_struct *work)
|
||||
{
|
||||
int err;
|
||||
struct hfsplus_sb_info *sbi;
|
||||
|
||||
sbi = container_of(work, struct hfsplus_sb_info, sync_work.work);
|
||||
@@ -234,7 +241,9 @@ static void delayed_sync_fs(struct work_struct *work)
|
||||
sbi->work_queued = 0;
|
||||
spin_unlock(&sbi->work_lock);
|
||||
|
||||
hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
|
||||
err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
|
||||
if (err)
|
||||
printk(KERN_ERR "hfs: delayed sync fs err %d\n", err);
|
||||
}
|
||||
|
||||
void hfsplus_mark_mdb_dirty(struct super_block *sb)
|
||||
|
在新工单中引用
屏蔽一个用户