Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
* 'for-linus' of git://oss.sgi.com/xfs/xfs: xfs: Fix error return for fallocate() on XFS xfs: cleanup dmapi macros in the umount path xfs: remove incorrect sparse annotation for xfs_iget_cache_miss xfs: kill the STATIC_INLINE macro xfs: uninline xfs_get_extsz_hint xfs: rename xfs_attr_fetch to xfs_attr_get_int xfs: simplify xfs_buf_get / xfs_buf_read interfaces xfs: remove IO_ISAIO xfs: Wrapped journal record corruption on read at recovery xfs: cleanup data end I/O handlers xfs: use WRITE_SYNC_PLUG for synchronous writeout xfs: reset the i_iolock lock class in the reclaim path xfs: I/O completion handlers must use NOFS allocations xfs: fix mmap_sem/iolock inversion in xfs_free_eofblocks xfs: simplify inode teardown
This commit is contained in:
@@ -235,71 +235,36 @@ xfs_setfilesize(
|
||||
}
|
||||
|
||||
/*
|
||||
* Buffered IO write completion for delayed allocate extents.
|
||||
* IO write completion.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_end_bio_delalloc(
|
||||
struct work_struct *work)
|
||||
{
|
||||
xfs_ioend_t *ioend =
|
||||
container_of(work, xfs_ioend_t, io_work);
|
||||
|
||||
xfs_setfilesize(ioend);
|
||||
xfs_destroy_ioend(ioend);
|
||||
}
|
||||
|
||||
/*
|
||||
* Buffered IO write completion for regular, written extents.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_end_bio_written(
|
||||
struct work_struct *work)
|
||||
{
|
||||
xfs_ioend_t *ioend =
|
||||
container_of(work, xfs_ioend_t, io_work);
|
||||
|
||||
xfs_setfilesize(ioend);
|
||||
xfs_destroy_ioend(ioend);
|
||||
}
|
||||
|
||||
/*
|
||||
* IO write completion for unwritten extents.
|
||||
*
|
||||
* Issue transactions to convert a buffer range from unwritten
|
||||
* to written extents.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_end_bio_unwritten(
|
||||
xfs_end_io(
|
||||
struct work_struct *work)
|
||||
{
|
||||
xfs_ioend_t *ioend =
|
||||
container_of(work, xfs_ioend_t, io_work);
|
||||
struct xfs_inode *ip = XFS_I(ioend->io_inode);
|
||||
xfs_off_t offset = ioend->io_offset;
|
||||
size_t size = ioend->io_size;
|
||||
|
||||
if (likely(!ioend->io_error)) {
|
||||
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
|
||||
int error;
|
||||
error = xfs_iomap_write_unwritten(ip, offset, size);
|
||||
if (error)
|
||||
ioend->io_error = error;
|
||||
}
|
||||
xfs_setfilesize(ioend);
|
||||
/*
|
||||
* For unwritten extents we need to issue transactions to convert a
|
||||
* range to normal written extens after the data I/O has finished.
|
||||
*/
|
||||
if (ioend->io_type == IOMAP_UNWRITTEN &&
|
||||
likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
|
||||
int error;
|
||||
|
||||
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
|
||||
ioend->io_size);
|
||||
if (error)
|
||||
ioend->io_error = error;
|
||||
}
|
||||
xfs_destroy_ioend(ioend);
|
||||
}
|
||||
|
||||
/*
|
||||
* IO read completion for regular, written extents.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_end_bio_read(
|
||||
struct work_struct *work)
|
||||
{
|
||||
xfs_ioend_t *ioend =
|
||||
container_of(work, xfs_ioend_t, io_work);
|
||||
|
||||
/*
|
||||
* We might have to update the on-disk file size after extending
|
||||
* writes.
|
||||
*/
|
||||
if (ioend->io_type != IOMAP_READ)
|
||||
xfs_setfilesize(ioend);
|
||||
xfs_destroy_ioend(ioend);
|
||||
}
|
||||
|
||||
@@ -314,10 +279,10 @@ xfs_finish_ioend(
|
||||
int wait)
|
||||
{
|
||||
if (atomic_dec_and_test(&ioend->io_remaining)) {
|
||||
struct workqueue_struct *wq = xfsdatad_workqueue;
|
||||
if (ioend->io_work.func == xfs_end_bio_unwritten)
|
||||
wq = xfsconvertd_workqueue;
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
|
||||
xfsconvertd_workqueue : xfsdatad_workqueue;
|
||||
queue_work(wq, &ioend->io_work);
|
||||
if (wait)
|
||||
flush_workqueue(wq);
|
||||
@@ -355,15 +320,7 @@ xfs_alloc_ioend(
|
||||
ioend->io_offset = 0;
|
||||
ioend->io_size = 0;
|
||||
|
||||
if (type == IOMAP_UNWRITTEN)
|
||||
INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
|
||||
else if (type == IOMAP_DELAY)
|
||||
INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
|
||||
else if (type == IOMAP_READ)
|
||||
INIT_WORK(&ioend->io_work, xfs_end_bio_read);
|
||||
else
|
||||
INIT_WORK(&ioend->io_work, xfs_end_bio_written);
|
||||
|
||||
INIT_WORK(&ioend->io_work, xfs_end_io);
|
||||
return ioend;
|
||||
}
|
||||
|
||||
@@ -380,7 +337,7 @@ xfs_map_blocks(
|
||||
return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
|
||||
}
|
||||
|
||||
STATIC_INLINE int
|
||||
STATIC int
|
||||
xfs_iomap_valid(
|
||||
xfs_iomap_t *iomapp,
|
||||
loff_t offset)
|
||||
@@ -412,8 +369,9 @@ xfs_end_bio(
|
||||
|
||||
STATIC void
|
||||
xfs_submit_ioend_bio(
|
||||
xfs_ioend_t *ioend,
|
||||
struct bio *bio)
|
||||
struct writeback_control *wbc,
|
||||
xfs_ioend_t *ioend,
|
||||
struct bio *bio)
|
||||
{
|
||||
atomic_inc(&ioend->io_remaining);
|
||||
bio->bi_private = ioend;
|
||||
@@ -426,7 +384,8 @@ xfs_submit_ioend_bio(
|
||||
if (xfs_ioend_new_eof(ioend))
|
||||
xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
|
||||
|
||||
submit_bio(WRITE, bio);
|
||||
submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
|
||||
WRITE_SYNC_PLUG : WRITE, bio);
|
||||
ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
|
||||
bio_put(bio);
|
||||
}
|
||||
@@ -505,6 +464,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
|
||||
*/
|
||||
STATIC void
|
||||
xfs_submit_ioend(
|
||||
struct writeback_control *wbc,
|
||||
xfs_ioend_t *ioend)
|
||||
{
|
||||
xfs_ioend_t *head = ioend;
|
||||
@@ -533,19 +493,19 @@ xfs_submit_ioend(
|
||||
retry:
|
||||
bio = xfs_alloc_ioend_bio(bh);
|
||||
} else if (bh->b_blocknr != lastblock + 1) {
|
||||
xfs_submit_ioend_bio(ioend, bio);
|
||||
xfs_submit_ioend_bio(wbc, ioend, bio);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (bio_add_buffer(bio, bh) != bh->b_size) {
|
||||
xfs_submit_ioend_bio(ioend, bio);
|
||||
xfs_submit_ioend_bio(wbc, ioend, bio);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
lastblock = bh->b_blocknr;
|
||||
}
|
||||
if (bio)
|
||||
xfs_submit_ioend_bio(ioend, bio);
|
||||
xfs_submit_ioend_bio(wbc, ioend, bio);
|
||||
xfs_finish_ioend(ioend, 0);
|
||||
} while ((ioend = next) != NULL);
|
||||
}
|
||||
@@ -1191,7 +1151,7 @@ xfs_page_state_convert(
|
||||
}
|
||||
|
||||
if (iohead)
|
||||
xfs_submit_ioend(iohead);
|
||||
xfs_submit_ioend(wbc, iohead);
|
||||
|
||||
return page_dirty;
|
||||
|
||||
@@ -1528,7 +1488,7 @@ xfs_end_io_direct(
|
||||
* didn't map an unwritten extent so switch it's completion
|
||||
* handler.
|
||||
*/
|
||||
INIT_WORK(&ioend->io_work, xfs_end_bio_written);
|
||||
ioend->io_type = IOMAP_NEW;
|
||||
xfs_finish_ioend(ioend, 0);
|
||||
}
|
||||
|
||||
|
@@ -149,7 +149,7 @@ page_region_mask(
|
||||
return mask;
|
||||
}
|
||||
|
||||
STATIC_INLINE void
|
||||
STATIC void
|
||||
set_page_region(
|
||||
struct page *page,
|
||||
size_t offset,
|
||||
@@ -161,7 +161,7 @@ set_page_region(
|
||||
SetPageUptodate(page);
|
||||
}
|
||||
|
||||
STATIC_INLINE int
|
||||
STATIC int
|
||||
test_page_region(
|
||||
struct page *page,
|
||||
size_t offset,
|
||||
@@ -582,7 +582,7 @@ found:
|
||||
* although backing storage may not be.
|
||||
*/
|
||||
xfs_buf_t *
|
||||
xfs_buf_get_flags(
|
||||
xfs_buf_get(
|
||||
xfs_buftarg_t *target,/* target for buffer */
|
||||
xfs_off_t ioff, /* starting offset of range */
|
||||
size_t isize, /* length of range */
|
||||
@@ -661,7 +661,7 @@ _xfs_buf_read(
|
||||
}
|
||||
|
||||
xfs_buf_t *
|
||||
xfs_buf_read_flags(
|
||||
xfs_buf_read(
|
||||
xfs_buftarg_t *target,
|
||||
xfs_off_t ioff,
|
||||
size_t isize,
|
||||
@@ -671,7 +671,7 @@ xfs_buf_read_flags(
|
||||
|
||||
flags |= XBF_READ;
|
||||
|
||||
bp = xfs_buf_get_flags(target, ioff, isize, flags);
|
||||
bp = xfs_buf_get(target, ioff, isize, flags);
|
||||
if (bp) {
|
||||
if (!XFS_BUF_ISDONE(bp)) {
|
||||
XB_TRACE(bp, "read", (unsigned long)flags);
|
||||
@@ -718,7 +718,7 @@ xfs_buf_readahead(
|
||||
return;
|
||||
|
||||
flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
|
||||
xfs_buf_read_flags(target, ioff, isize, flags);
|
||||
xfs_buf_read(target, ioff, isize, flags);
|
||||
}
|
||||
|
||||
xfs_buf_t *
|
||||
@@ -1113,7 +1113,7 @@ xfs_bdwrite(
|
||||
xfs_buf_delwri_queue(bp, 1);
|
||||
}
|
||||
|
||||
STATIC_INLINE void
|
||||
STATIC void
|
||||
_xfs_buf_ioend(
|
||||
xfs_buf_t *bp,
|
||||
int schedule)
|
||||
|
@@ -186,15 +186,10 @@ extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
|
||||
#define xfs_incore(buftarg,blkno,len,lockit) \
|
||||
_xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
|
||||
|
||||
extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t,
|
||||
extern xfs_buf_t *xfs_buf_get(xfs_buftarg_t *, xfs_off_t, size_t,
|
||||
xfs_buf_flags_t);
|
||||
#define xfs_buf_get(target, blkno, len, flags) \
|
||||
xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
|
||||
|
||||
extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t,
|
||||
extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t,
|
||||
xfs_buf_flags_t);
|
||||
#define xfs_buf_read(target, blkno, len, flags) \
|
||||
xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
|
||||
|
||||
extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
|
||||
extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *);
|
||||
|
@@ -52,7 +52,7 @@ xfs_file_aio_read(
|
||||
loff_t pos)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
int ioflags = IO_ISAIO;
|
||||
int ioflags = 0;
|
||||
|
||||
BUG_ON(iocb->ki_pos != pos);
|
||||
if (unlikely(file->f_flags & O_DIRECT))
|
||||
@@ -71,7 +71,7 @@ xfs_file_aio_write(
|
||||
loff_t pos)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
int ioflags = IO_ISAIO;
|
||||
int ioflags = 0;
|
||||
|
||||
BUG_ON(iocb->ki_pos != pos);
|
||||
if (unlikely(file->f_flags & O_DIRECT))
|
||||
|
@@ -573,8 +573,8 @@ xfs_vn_fallocate(
|
||||
bf.l_len = len;
|
||||
|
||||
xfs_ilock(ip, XFS_IOLOCK_EXCL);
|
||||
error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
|
||||
0, XFS_ATTR_NOLOCK);
|
||||
error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
|
||||
0, XFS_ATTR_NOLOCK);
|
||||
if (!error && !(mode & FALLOC_FL_KEEP_SIZE) &&
|
||||
offset + len > i_size_read(inode))
|
||||
new_size = offset + len;
|
||||
@@ -585,7 +585,7 @@ xfs_vn_fallocate(
|
||||
|
||||
iattr.ia_valid = ATTR_SIZE;
|
||||
iattr.ia_size = new_size;
|
||||
error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
|
||||
error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
|
||||
}
|
||||
|
||||
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
|
||||
|
@@ -255,8 +255,6 @@ xfs_read(
|
||||
|
||||
iocb->ki_pos = *offset;
|
||||
ret = generic_file_aio_read(iocb, iovp, segs, *offset);
|
||||
if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
|
||||
ret = wait_on_sync_kiocb(iocb);
|
||||
if (ret > 0)
|
||||
XFS_STATS_ADD(xs_read_bytes, ret);
|
||||
|
||||
@@ -774,9 +772,6 @@ write_retry:
|
||||
|
||||
current->backing_dev_info = NULL;
|
||||
|
||||
if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
|
||||
ret = wait_on_sync_kiocb(iocb);
|
||||
|
||||
isize = i_size_read(inode);
|
||||
if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
|
||||
*offset = isize;
|
||||
|
@@ -930,13 +930,39 @@ xfs_fs_alloc_inode(
|
||||
*/
|
||||
STATIC void
|
||||
xfs_fs_destroy_inode(
|
||||
struct inode *inode)
|
||||
struct inode *inode)
|
||||
{
|
||||
xfs_inode_t *ip = XFS_I(inode);
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
|
||||
xfs_itrace_entry(ip);
|
||||
|
||||
XFS_STATS_INC(vn_reclaim);
|
||||
if (xfs_reclaim(ip))
|
||||
panic("%s: cannot reclaim 0x%p\n", __func__, inode);
|
||||
|
||||
/* bad inode, get out here ASAP */
|
||||
if (is_bad_inode(inode))
|
||||
goto out_reclaim;
|
||||
|
||||
xfs_ioend_wait(ip);
|
||||
|
||||
ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
|
||||
|
||||
/*
|
||||
* We should never get here with one of the reclaim flags already set.
|
||||
*/
|
||||
ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
|
||||
ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
|
||||
|
||||
/*
|
||||
* If we have nothing to flush with this inode then complete the
|
||||
* teardown now, otherwise delay the flush operation.
|
||||
*/
|
||||
if (!xfs_inode_clean(ip)) {
|
||||
xfs_inode_set_reclaim_tag(ip);
|
||||
return;
|
||||
}
|
||||
|
||||
out_reclaim:
|
||||
xfs_ireclaim(ip);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -973,7 +999,6 @@ xfs_fs_inode_init_once(
|
||||
|
||||
mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
|
||||
"xfsino", ip->i_ino);
|
||||
mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1075,6 +1100,20 @@ xfs_fs_clear_inode(
|
||||
XFS_STATS_INC(vn_remove);
|
||||
XFS_STATS_DEC(vn_active);
|
||||
|
||||
/*
|
||||
* The iolock is used by the file system to coordinate reads,
|
||||
* writes, and block truncates. Up to this point the lock
|
||||
* protected concurrent accesses by users of the inode. But
|
||||
* from here forward we're doing some final processing of the
|
||||
* inode because we're done with it, and although we reuse the
|
||||
* iolock for protection it is really a distinct lock class
|
||||
* (in the lockdep sense) from before. To keep lockdep happy
|
||||
* (and basically indicate what we are doing), we explicitly
|
||||
* re-init the iolock here.
|
||||
*/
|
||||
ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
|
||||
mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
|
||||
|
||||
xfs_inactive(ip);
|
||||
}
|
||||
|
||||
@@ -1092,8 +1131,6 @@ xfs_fs_put_super(
|
||||
struct super_block *sb)
|
||||
{
|
||||
struct xfs_mount *mp = XFS_M(sb);
|
||||
struct xfs_inode *rip = mp->m_rootip;
|
||||
int unmount_event_flags = 0;
|
||||
|
||||
xfs_syncd_stop(mp);
|
||||
|
||||
@@ -1109,20 +1146,7 @@ xfs_fs_put_super(
|
||||
xfs_sync_attr(mp, 0);
|
||||
}
|
||||
|
||||
#ifdef HAVE_DMAPI
|
||||
if (mp->m_flags & XFS_MOUNT_DMAPI) {
|
||||
unmount_event_flags =
|
||||
(mp->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ?
|
||||
0 : DM_FLAGS_UNWANTED;
|
||||
/*
|
||||
* Ignore error from dmapi here, first unmount is not allowed
|
||||
* to fail anyway, and second we wouldn't want to fail a
|
||||
* unmount because of dmapi.
|
||||
*/
|
||||
XFS_SEND_PREUNMOUNT(mp, rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL,
|
||||
NULL, NULL, 0, 0, unmount_event_flags);
|
||||
}
|
||||
#endif
|
||||
XFS_SEND_PREUNMOUNT(mp);
|
||||
|
||||
/*
|
||||
* Blow away any referenced inode in the filestreams cache.
|
||||
@@ -1133,10 +1157,7 @@ xfs_fs_put_super(
|
||||
|
||||
XFS_bflush(mp->m_ddev_targp);
|
||||
|
||||
if (mp->m_flags & XFS_MOUNT_DMAPI) {
|
||||
XFS_SEND_UNMOUNT(mp, rip, DM_RIGHT_NULL, 0, 0,
|
||||
unmount_event_flags);
|
||||
}
|
||||
XFS_SEND_UNMOUNT(mp);
|
||||
|
||||
xfs_unmountfs(mp);
|
||||
xfs_freesb(mp);
|
||||
|
@@ -663,10 +663,9 @@ xfs_syncd_stop(
|
||||
kthread_stop(mp->m_sync_task);
|
||||
}
|
||||
|
||||
int
|
||||
STATIC int
|
||||
xfs_reclaim_inode(
|
||||
xfs_inode_t *ip,
|
||||
int locked,
|
||||
int sync_mode)
|
||||
{
|
||||
xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
|
||||
@@ -682,10 +681,6 @@ xfs_reclaim_inode(
|
||||
!__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
|
||||
spin_unlock(&ip->i_flags_lock);
|
||||
write_unlock(&pag->pag_ici_lock);
|
||||
if (locked) {
|
||||
xfs_ifunlock(ip);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
}
|
||||
return -EAGAIN;
|
||||
}
|
||||
__xfs_iflags_set(ip, XFS_IRECLAIM);
|
||||
@@ -704,10 +699,8 @@ xfs_reclaim_inode(
|
||||
* We get the flush lock regardless, though, just to make sure
|
||||
* we don't free it while it is being flushed.
|
||||
*/
|
||||
if (!locked) {
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_iflock(ip);
|
||||
}
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
xfs_iflock(ip);
|
||||
|
||||
/*
|
||||
* In the case of a forced shutdown we rely on xfs_iflush() to
|
||||
@@ -778,7 +771,7 @@ xfs_reclaim_inode_now(
|
||||
}
|
||||
read_unlock(&pag->pag_ici_lock);
|
||||
|
||||
return xfs_reclaim_inode(ip, 0, flags);
|
||||
return xfs_reclaim_inode(ip, flags);
|
||||
}
|
||||
|
||||
int
|
||||
|
@@ -44,7 +44,6 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
|
||||
|
||||
void xfs_flush_inodes(struct xfs_inode *ip);
|
||||
|
||||
int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode);
|
||||
int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
|
||||
|
||||
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
|
||||
|
@@ -36,7 +36,6 @@ struct attrlist_cursor_kern;
|
||||
/*
|
||||
* Flags for read/write calls - same values as IRIX
|
||||
*/
|
||||
#define IO_ISAIO 0x00001 /* don't wait for completion */
|
||||
#define IO_ISDIRECT 0x00004 /* bypass page cache */
|
||||
#define IO_INVIS 0x00020 /* don't update inode timestamps */
|
||||
|
||||
|
Reference in New Issue
Block a user