Merge branch 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull iov_iter updates from Al Viro: - bio_{map,copy}_user_iov() series; those are cleanups - fixes from the same pile went into mainline (and stable) in late September. - fs/iomap.c iov_iter-related fixes - new primitive - iov_iter_for_each_range(), which applies a function to kernel-mapped segments of an iov_iter. Usable for kvec and bvec ones, the latter does kmap()/kunmap() around the callback. _Not_ usable for iovec- or pipe-backed iov_iter; the latter is not hard to fix if the need ever appears, the former is by design. Another related primitive will have to wait for the next cycle - it passes page + offset + size instead of pointer + size, and that one will be usable for everything _except_ kvec. Unfortunately, that one didn't get exposure in -next yet, so... - a bit more lustre iov_iter work, including a use case for iov_iter_for_each_range() (checksum calculation) - vhost/scsi leak fix in failure exit - misc cleanups and detritectomy... * 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (21 commits) iomap_dio_actor(): fix iov_iter bugs switch ksocknal_lib_recv_...() to use of iov_iter_for_each_range() lustre: switch struct ksock_conn to iov_iter vhost/scsi: switch to iov_iter_get_pages() fix a page leak in vhost_scsi_iov_to_sgl() error recovery new primitive: iov_iter_for_each_range() lnet_return_rx_credits_locked: don't abuse list_entry xen: don't open-code iov_iter_kvec() orangefs: remove detritus from struct orangefs_kiocb_s kill iov_shorten() bio_alloc_map_data(): do bmd->iter setup right there bio_copy_user_iov(): saner bio size calculation bio_map_user_iov(): get rid of copying iov_iter bio_copy_from_iter(): get rid of copying iov_iter move more stuff down into bio_copy_user_iov() blk_rq_map_user_iov(): move iov_iter_advance() down bio_map_user_iov(): get rid of the iov_for_each() bio_map_user_iov(): move alignment check into the main loop don't rely upon subsequent bio_add_pc_page() calls failing ... and with iov_iter_get_pages_alloc() it becomes even simpler ...
This commit is contained in:
@@ -210,12 +210,6 @@ static struct workqueue_struct *vhost_scsi_workqueue;
|
||||
static DEFINE_MUTEX(vhost_scsi_mutex);
|
||||
static LIST_HEAD(vhost_scsi_list);
|
||||
|
||||
static int iov_num_pages(void __user *iov_base, size_t iov_len)
|
||||
{
|
||||
return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
|
||||
((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static void vhost_scsi_done_inflight(struct kref *kref)
|
||||
{
|
||||
struct vhost_scsi_inflight *inflight;
|
||||
@@ -618,48 +612,31 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
|
||||
*/
|
||||
static int
|
||||
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
|
||||
void __user *ptr,
|
||||
size_t len,
|
||||
struct iov_iter *iter,
|
||||
struct scatterlist *sgl,
|
||||
bool write)
|
||||
{
|
||||
unsigned int npages = 0, offset, nbytes;
|
||||
unsigned int pages_nr = iov_num_pages(ptr, len);
|
||||
struct scatterlist *sg = sgl;
|
||||
struct page **pages = cmd->tvc_upages;
|
||||
int ret, i;
|
||||
struct scatterlist *sg = sgl;
|
||||
ssize_t bytes;
|
||||
size_t offset;
|
||||
unsigned int npages = 0;
|
||||
|
||||
if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
|
||||
pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
|
||||
" preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
|
||||
pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
|
||||
bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
|
||||
VHOST_SCSI_PREALLOC_UPAGES, &offset);
|
||||
/* No pages were pinned */
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
/* Less pages pinned than wanted */
|
||||
if (ret != pages_nr) {
|
||||
for (i = 0; i < ret; i++)
|
||||
put_page(pages[i]);
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
if (bytes <= 0)
|
||||
return bytes < 0 ? bytes : -EFAULT;
|
||||
|
||||
while (len > 0) {
|
||||
offset = (uintptr_t)ptr & ~PAGE_MASK;
|
||||
nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
|
||||
sg_set_page(sg, pages[npages], nbytes, offset);
|
||||
ptr += nbytes;
|
||||
len -= nbytes;
|
||||
sg++;
|
||||
npages++;
|
||||
}
|
||||
iov_iter_advance(iter, bytes);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
while (bytes) {
|
||||
unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
|
||||
sg_set_page(sg++, pages[npages++], n, offset);
|
||||
bytes -= n;
|
||||
offset = 0;
|
||||
}
|
||||
return npages;
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -687,24 +664,20 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
|
||||
struct iov_iter *iter,
|
||||
struct scatterlist *sg, int sg_count)
|
||||
{
|
||||
size_t off = iter->iov_offset;
|
||||
int i, ret;
|
||||
struct scatterlist *p = sg;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < iter->nr_segs; i++) {
|
||||
void __user *base = iter->iov[i].iov_base + off;
|
||||
size_t len = iter->iov[i].iov_len - off;
|
||||
|
||||
ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
|
||||
while (iov_iter_count(iter)) {
|
||||
ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
|
||||
if (ret < 0) {
|
||||
for (i = 0; i < sg_count; i++) {
|
||||
struct page *page = sg_page(&sg[i]);
|
||||
while (p < sg) {
|
||||
struct page *page = sg_page(p++);
|
||||
if (page)
|
||||
put_page(page);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
sg += ret;
|
||||
off = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user