Merge branch 'for-3.20/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe: "This contains: - A series from Christoph that cleans up and refactors various parts of the REQ_BLOCK_PC handling. Contributions in that series from Dongsu Park and Kent Overstreet as well. - CFQ: - A bug fix for cfq for realtime IO scheduling from Jeff Moyer. - A stable patch fixing a potential crash in CFQ in OOM situations. From Konstantin Khlebnikov. - blk-mq: - Add support for tag allocation policies, from Shaohua. This is a prep patch enabling libata (and other SCSI parts) to use the blk-mq tagging, instead of rolling their own. - Various little tweaks from Keith and Mike, in preparation for DM blk-mq support. - Minor little fixes or tweaks from me. - A double free error fix from Tony Battersby. - The partition 4k issue fixes from Matthew and Boaz. - Add support for zero+unprovision for blkdev_issue_zeroout() from Martin" * 'for-3.20/core' of git://git.kernel.dk/linux-block: (27 commits) block: remove unused function blk_bio_map_sg block: handle the null_mapped flag correctly in blk_rq_map_user_iov blk-mq: fix double-free in error path block: prevent request-to-request merging with gaps if not allowed blk-mq: make blk_mq_run_queues() static dm: fix multipath regression due to initializing wrong request cfq-iosched: handle failure of cfq group allocation block: Quiesce zeroout wrapper block: rewrite and split __bio_copy_iov() block: merge __bio_map_user_iov into bio_map_user_iov block: merge __bio_map_kern into bio_map_kern block: pass iov_iter to the BLOCK_PC mapping functions block: add a helper to free bio bounce buffer pages block: use blk_rq_map_user_iov to implement blk_rq_map_user block: simplify bio_map_kern block: mark blk-mq devices as stackable block: keep established cmd_flags when cloning into a blk-mq request block: add blk-mq support to blk_insert_cloned_request() block: require blk_rq_prep_clone() be given an initialized clone request blk-mq: add tag allocation policy ...
This commit is contained in:
@@ -1719,22 +1719,19 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
|
||||
}
|
||||
|
||||
if (iov_count) {
|
||||
int len, size = sizeof(struct sg_iovec) * iov_count;
|
||||
int size = sizeof(struct iovec) * iov_count;
|
||||
struct iovec *iov;
|
||||
struct iov_iter i;
|
||||
|
||||
iov = memdup_user(hp->dxferp, size);
|
||||
if (IS_ERR(iov))
|
||||
return PTR_ERR(iov);
|
||||
|
||||
len = iov_length(iov, iov_count);
|
||||
if (hp->dxfer_len < len) {
|
||||
iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
|
||||
len = hp->dxfer_len;
|
||||
}
|
||||
iov_iter_init(&i, rw, iov, iov_count,
|
||||
min_t(size_t, hp->dxfer_len,
|
||||
iov_length(iov, iov_count)));
|
||||
|
||||
res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov,
|
||||
iov_count,
|
||||
len, GFP_ATOMIC);
|
||||
res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
|
||||
kfree(iov);
|
||||
} else
|
||||
res = blk_rq_map_user(q, rq, md, hp->dxferp,
|
||||
|
Reference in New Issue
Block a user