[XFS] Start writeout earlier (on last close) in the case where we have a
truncate down followed by delayed allocation (buffered writes) - worst case scenario for the notorious NULL files problem. This reduces the window where we are exposed to that problem significantly. SGI-PV: 917976 SGI-Modid: xfs-linux-melb:xfs-kern:26100a Signed-off-by: Nathan Scott <nathans@sgi.com>
This commit is contained in:
@@ -1157,6 +1157,18 @@ out_unlock:
|
||||
return error;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_vm_writepages(
|
||||
struct address_space *mapping,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
struct vnode *vp = vn_from_inode(mapping->host);
|
||||
|
||||
if (VN_TRUNC(vp))
|
||||
VUNTRUNCATE(vp);
|
||||
return generic_writepages(mapping, wbc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called to move a page into cleanable state - and from there
|
||||
* to be released. Possibly the page is already clean. We always
|
||||
@@ -1451,6 +1463,7 @@ struct address_space_operations xfs_address_space_operations = {
|
||||
.readpage = xfs_vm_readpage,
|
||||
.readpages = xfs_vm_readpages,
|
||||
.writepage = xfs_vm_writepage,
|
||||
.writepages = xfs_vm_writepages,
|
||||
.sync_page = block_sync_page,
|
||||
.releasepage = xfs_vm_releasepage,
|
||||
.invalidatepage = xfs_vm_invalidatepage,
|
||||
|
@@ -323,6 +323,17 @@ xfs_file_open(
|
||||
return -error;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_file_close(
|
||||
struct file *filp)
|
||||
{
|
||||
vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode);
|
||||
int error;
|
||||
|
||||
VOP_CLOSE(vp, 0, file_count(filp) > 1 ? L_FALSE : L_TRUE, NULL, error);
|
||||
return -error;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_file_release(
|
||||
struct inode *inode,
|
||||
@@ -349,6 +360,8 @@ xfs_file_fsync(
|
||||
|
||||
if (datasync)
|
||||
flags |= FSYNC_DATA;
|
||||
if (VN_TRUNC(vp))
|
||||
VUNTRUNCATE(vp);
|
||||
VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error);
|
||||
return -error;
|
||||
}
|
||||
@@ -578,6 +591,7 @@ const struct file_operations xfs_file_operations = {
|
||||
#endif
|
||||
.mmap = xfs_file_mmap,
|
||||
.open = xfs_file_open,
|
||||
.flush = xfs_file_close,
|
||||
.release = xfs_file_release,
|
||||
.fsync = xfs_file_fsync,
|
||||
#ifdef HAVE_FOP_OPEN_EXEC
|
||||
@@ -602,6 +616,7 @@ const struct file_operations xfs_invis_file_operations = {
|
||||
#endif
|
||||
.mmap = xfs_file_mmap,
|
||||
.open = xfs_file_open,
|
||||
.flush = xfs_file_close,
|
||||
.release = xfs_file_release,
|
||||
.fsync = xfs_file_fsync,
|
||||
};
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
|
||||
* Copyright (c) 2000-2002,2005-2006 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
@@ -15,40 +15,12 @@
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "xfs.h"
|
||||
|
||||
/*
|
||||
* Stub for no-op vnode operations that return error status.
|
||||
*/
|
||||
int
|
||||
fs_noerr(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
int fs_noerr(void) { return 0; }
|
||||
int fs_nosys(void) { return ENOSYS; }
|
||||
void fs_noval(void) { return; }
|
||||
|
||||
/*
|
||||
* Operation unsupported under this file system.
|
||||
*/
|
||||
int
|
||||
fs_nosys(void)
|
||||
{
|
||||
return ENOSYS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stub for inactive, strategy, and read/write lock/unlock. Does nothing.
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
void
|
||||
fs_noval(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* vnode pcache layer for vnode_tosspages.
|
||||
* 'last' parameter unused but left in for IRIX compatibility
|
||||
*/
|
||||
void
|
||||
fs_tosspages(
|
||||
bhv_desc_t *bdp,
|
||||
@@ -63,11 +35,6 @@ fs_tosspages(
|
||||
truncate_inode_pages(ip->i_mapping, first);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* vnode pcache layer for vnode_flushinval_pages.
|
||||
* 'last' parameter unused but left in for IRIX compatibility
|
||||
*/
|
||||
void
|
||||
fs_flushinval_pages(
|
||||
bhv_desc_t *bdp,
|
||||
@@ -79,16 +46,13 @@ fs_flushinval_pages(
|
||||
struct inode *ip = vn_to_inode(vp);
|
||||
|
||||
if (VN_CACHED(vp)) {
|
||||
if (VN_TRUNC(vp))
|
||||
VUNTRUNCATE(vp);
|
||||
filemap_write_and_wait(ip->i_mapping);
|
||||
|
||||
truncate_inode_pages(ip->i_mapping, first);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* vnode pcache layer for vnode_flush_pages.
|
||||
* 'last' parameter unused but left in for IRIX compatibility
|
||||
*/
|
||||
int
|
||||
fs_flush_pages(
|
||||
bhv_desc_t *bdp,
|
||||
@@ -100,12 +64,13 @@ fs_flush_pages(
|
||||
vnode_t *vp = BHV_TO_VNODE(bdp);
|
||||
struct inode *ip = vn_to_inode(vp);
|
||||
|
||||
if (VN_CACHED(vp)) {
|
||||
if (VN_DIRTY(vp)) {
|
||||
if (VN_TRUNC(vp))
|
||||
VUNTRUNCATE(vp);
|
||||
filemap_fdatawrite(ip->i_mapping);
|
||||
if (flags & XFS_B_ASYNC)
|
||||
return 0;
|
||||
filemap_fdatawait(ip->i_mapping);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -56,12 +56,18 @@ typedef xfs_ino_t vnumber_t;
|
||||
typedef struct dentry vname_t;
|
||||
typedef bhv_head_t vn_bhv_head_t;
|
||||
|
||||
typedef enum vflags {
|
||||
VMODIFIED = 0x08, /* XFS inode state possibly differs */
|
||||
/* to the Linux inode state. */
|
||||
VTRUNCATED = 0x40, /* truncated down so flush-on-close */
|
||||
} vflags_t;
|
||||
|
||||
/*
|
||||
* MP locking protocols:
|
||||
* v_flag, v_vfsp VN_LOCK/VN_UNLOCK
|
||||
*/
|
||||
typedef struct vnode {
|
||||
__u32 v_flag; /* vnode flags (see below) */
|
||||
vflags_t v_flag; /* vnode flags (see above) */
|
||||
struct vfs *v_vfsp; /* ptr to containing VFS */
|
||||
vnumber_t v_number; /* in-core vnode number */
|
||||
vn_bhv_head_t v_bh; /* behavior head */
|
||||
@@ -125,12 +131,6 @@ static inline struct inode *vn_to_inode(struct vnode *vnode)
|
||||
return &vnode->v_inode;
|
||||
}
|
||||
|
||||
/*
|
||||
* Vnode flags.
|
||||
*/
|
||||
#define VMODIFIED 0x8 /* XFS inode state possibly differs */
|
||||
/* to the Linux inode state. */
|
||||
|
||||
/*
|
||||
* Values for the VOP_RWLOCK and VOP_RWUNLOCK flags parameter.
|
||||
*/
|
||||
@@ -162,8 +162,10 @@ typedef enum vchange {
|
||||
VCHANGE_FLAGS_IOEXCL_COUNT = 4
|
||||
} vchange_t;
|
||||
|
||||
typedef enum { L_FALSE, L_TRUE } lastclose_t;
|
||||
|
||||
typedef int (*vop_open_t)(bhv_desc_t *, struct cred *);
|
||||
typedef int (*vop_close_t)(bhv_desc_t *, int, lastclose_t, struct cred *);
|
||||
typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct kiocb *,
|
||||
const struct iovec *, unsigned int,
|
||||
loff_t *, int, struct cred *);
|
||||
@@ -234,6 +236,7 @@ typedef int (*vop_iflush_t)(bhv_desc_t *, int);
|
||||
typedef struct vnodeops {
|
||||
bhv_position_t vn_position; /* position within behavior chain */
|
||||
vop_open_t vop_open;
|
||||
vop_close_t vop_close;
|
||||
vop_read_t vop_read;
|
||||
vop_write_t vop_write;
|
||||
vop_sendfile_t vop_sendfile;
|
||||
@@ -278,6 +281,10 @@ typedef struct vnodeops {
|
||||
*/
|
||||
#define _VOP_(op, vp) (*((vnodeops_t *)(vp)->v_fops)->op)
|
||||
|
||||
#define VOP_OPEN(vp, cr, rv) \
|
||||
rv = _VOP_(vop_open, vp)((vp)->v_fbhv, cr)
|
||||
#define VOP_CLOSE(vp, f, last, cr, rv) \
|
||||
rv = _VOP_(vop_close, vp)((vp)->v_fbhv, f, last, cr)
|
||||
#define VOP_READ(vp,file,iov,segs,offset,ioflags,cr,rv) \
|
||||
rv = _VOP_(vop_read, vp)((vp)->v_fbhv,file,iov,segs,offset,ioflags,cr)
|
||||
#define VOP_WRITE(vp,file,iov,segs,offset,ioflags,cr,rv) \
|
||||
@@ -290,8 +297,6 @@ typedef struct vnodeops {
|
||||
rv = _VOP_(vop_splice_write, vp)((vp)->v_fbhv,f,o,pipe,cnt,fl,iofl,cr)
|
||||
#define VOP_BMAP(vp,of,sz,rw,b,n,rv) \
|
||||
rv = _VOP_(vop_bmap, vp)((vp)->v_fbhv,of,sz,rw,b,n)
|
||||
#define VOP_OPEN(vp, cr, rv) \
|
||||
rv = _VOP_(vop_open, vp)((vp)->v_fbhv, cr)
|
||||
#define VOP_GETATTR(vp, vap, f, cr, rv) \
|
||||
rv = _VOP_(vop_getattr, vp)((vp)->v_fbhv, vap, f, cr)
|
||||
#define VOP_SETATTR(vp, vap, f, cr, rv) \
|
||||
@@ -556,8 +561,6 @@ static inline struct vnode *vn_grab(struct vnode *vp)
|
||||
*/
|
||||
#define VN_LOCK(vp) mutex_spinlock(&(vp)->v_lock)
|
||||
#define VN_UNLOCK(vp, s) mutex_spinunlock(&(vp)->v_lock, s)
|
||||
#define VN_FLAGSET(vp,b) vn_flagset(vp,b)
|
||||
#define VN_FLAGCLR(vp,b) vn_flagclr(vp,b)
|
||||
|
||||
static __inline__ void vn_flagset(struct vnode *vp, uint flag)
|
||||
{
|
||||
@@ -566,13 +569,22 @@ static __inline__ void vn_flagset(struct vnode *vp, uint flag)
|
||||
spin_unlock(&vp->v_lock);
|
||||
}
|
||||
|
||||
static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
|
||||
static __inline__ uint vn_flagclr(struct vnode *vp, uint flag)
|
||||
{
|
||||
uint cleared;
|
||||
|
||||
spin_lock(&vp->v_lock);
|
||||
cleared = (vp->v_flag & flag);
|
||||
vp->v_flag &= ~flag;
|
||||
spin_unlock(&vp->v_lock);
|
||||
return cleared;
|
||||
}
|
||||
|
||||
#define VMODIFY(vp) vn_flagset(vp, VMODIFIED)
|
||||
#define VUNMODIFY(vp) vn_flagclr(vp, VMODIFIED)
|
||||
#define VTRUNCATE(vp) vn_flagset(vp, VTRUNCATED)
|
||||
#define VUNTRUNCATE(vp) vn_flagclr(vp, VTRUNCATED)
|
||||
|
||||
/*
|
||||
* Dealing with bad inodes
|
||||
*/
|
||||
@@ -612,8 +624,7 @@ static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt)
|
||||
#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages)
|
||||
#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \
|
||||
PAGECACHE_TAG_DIRTY)
|
||||
#define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED)
|
||||
#define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED)
|
||||
#define VN_TRUNC(vp) ((vp)->v_flag & VTRUNCATED)
|
||||
|
||||
/*
|
||||
* Flags to VOP_SETATTR/VOP_GETATTR.
|
||||
|
Reference in New Issue
Block a user