9P: introduction of a new cache=mmap model.

- Add cache=mmap option
 - Make mmap read-write while keeping it as synchronous as possible
 - Build writeback fid on mmap creation if it is writable

Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
This commit is contained in:
Dominique Martinet
2014-01-10 13:44:09 +01:00
committed by Eric Van Hensbergen
parent f94741fd28
commit fb89b45cdf
8 changed files with 179 additions and 19 deletions

View File

@@ -45,6 +45,7 @@
#include "cache.h"
static const struct vm_operations_struct v9fs_file_vm_ops;
static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
/**
* v9fs_file_open - open a file (or directory)
@@ -87,7 +88,8 @@ int v9fs_file_open(struct inode *inode, struct file *file)
file->private_data = fid;
mutex_lock(&v9inode->v_mutex);
if (v9ses->cache && !v9inode->writeback_fid &&
if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
!v9inode->writeback_fid &&
((file->f_flags & O_ACCMODE) != O_RDONLY)) {
/*
* clone a fid and add it to writeback_fid
@@ -105,7 +107,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
v9inode->writeback_fid = (void *) fid;
}
mutex_unlock(&v9inode->v_mutex);
if (v9ses->cache)
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
v9fs_cache_inode_set_cookie(inode, file);
return 0;
out_error:
@@ -579,17 +581,55 @@ int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
}
static int
v9fs_file_mmap(struct file *file, struct vm_area_struct *vma)
v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
int retval;
retval = generic_file_mmap(file, vma);
retval = generic_file_mmap(filp, vma);
if (!retval)
vma->vm_ops = &v9fs_file_vm_ops;
return retval;
}
static int
v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
int retval;
struct inode *inode;
struct v9fs_inode *v9inode;
struct p9_fid *fid;
inode = file_inode(filp);
v9inode = V9FS_I(inode);
mutex_lock(&v9inode->v_mutex);
if (!v9inode->writeback_fid &&
(vma->vm_flags & VM_WRITE)) {
/*
* clone a fid and add it to writeback_fid
* we do it during mmap instead of
* page dirty time via write_begin/page_mkwrite
* because we want write after unlink usecase
* to work.
*/
fid = v9fs_writeback_fid(filp->f_path.dentry);
if (IS_ERR(fid)) {
retval = PTR_ERR(fid);
mutex_unlock(&v9inode->v_mutex);
return retval;
}
v9inode->writeback_fid = (void *) fid;
}
mutex_unlock(&v9inode->v_mutex);
retval = generic_file_mmap(filp, vma);
if (!retval)
vma->vm_ops = &v9fs_mmap_file_vm_ops;
return retval;
}
static int
v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
@@ -658,6 +698,22 @@ v9fs_cached_file_read(struct file *filp, char __user *data, size_t count,
return do_sync_read(filp, data, count, offset);
}
/**
* v9fs_mmap_file_read - read from a file
* @filp: file pointer to read
* @udata: user data buffer to read data into
* @count: size of buffer
* @offset: offset at which to read data
*
*/
static ssize_t
v9fs_mmap_file_read(struct file *filp, char __user *data, size_t count,
loff_t *offset)
{
/* TODO: Check if there are dirty pages */
return v9fs_file_read(filp, data, count, offset);
}
static ssize_t
v9fs_direct_write(struct file *filp, const char __user * data,
size_t count, loff_t *offsetp)
@@ -728,12 +784,65 @@ v9fs_cached_file_write(struct file *filp, const char __user * data,
return do_sync_write(filp, data, count, offset);
}
/**
* v9fs_mmap_file_write - write to a file
* @filp: file pointer to write
* @data: data buffer to write data from
* @count: size of buffer
* @offset: offset at which to write data
*
*/
static ssize_t
v9fs_mmap_file_write(struct file *filp, const char __user *data,
size_t count, loff_t *offset)
{
/*
* TODO: invalidate mmaps on filp's inode between
* offset and offset+count
*/
return v9fs_file_write(filp, data, count, offset);
}
static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
{
struct inode *inode;
struct writeback_control wbc = {
.nr_to_write = LONG_MAX,
.sync_mode = WB_SYNC_ALL,
.range_start = vma->vm_pgoff * PAGE_SIZE,
/* absolute end, byte at end included */
.range_end = vma->vm_pgoff * PAGE_SIZE +
(vma->vm_end - vma->vm_start - 1),
};
p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
inode = file_inode(vma->vm_file);
if (!mapping_cap_writeback_dirty(inode->i_mapping))
wbc.nr_to_write = 0;
might_sleep();
sync_inode(inode, &wbc);
}
static const struct vm_operations_struct v9fs_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = v9fs_vm_page_mkwrite,
.remap_pages = generic_file_remap_pages,
};
static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
.close = v9fs_mmap_vm_close,
.fault = filemap_fault,
.page_mkwrite = v9fs_vm_page_mkwrite,
.remap_pages = generic_file_remap_pages,
};
const struct file_operations v9fs_cached_file_operations = {
.llseek = generic_file_llseek,
@@ -784,3 +893,26 @@ const struct file_operations v9fs_file_operations_dotl = {
.mmap = generic_file_readonly_mmap,
.fsync = v9fs_file_fsync_dotl,
};
const struct file_operations v9fs_mmap_file_operations = {
.llseek = generic_file_llseek,
.read = v9fs_mmap_file_read,
.write = v9fs_mmap_file_write,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
.mmap = v9fs_mmap_file_mmap,
.fsync = v9fs_file_fsync,
};
const struct file_operations v9fs_mmap_file_operations_dotl = {
.llseek = generic_file_llseek,
.read = v9fs_mmap_file_read,
.write = v9fs_mmap_file_write,
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock_dotl,
.flock = v9fs_file_flock_dotl,
.mmap = v9fs_mmap_file_mmap,
.fsync = v9fs_file_fsync_dotl,
};