Merge branch 'for-4.16/block' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "This is the main pull request for block IO related changes for the 4.16 kernel. Nothing major in this pull request, but a good amount of improvements and fixes all over the map. This contains: - BFQ improvements, fixes, and cleanups from Angelo, Chiara, and Paolo. - Support for SMR zones for deadline and mq-deadline from Damien and Christoph. - Set of fixes for bcache by way of Michael Lyle, including fixes from himself, Kent, Rui, Tang, and Coly. - Series from Matias for lightnvm with fixes from Hans Holmberg, Javier, and Matias. Mostly centered around pblk, and the removing rrpc 1.2 in preparation for supporting 2.0. - A couple of NVMe pull requests from Christoph. Nothing major in here, just fixes and cleanups, and support for command tracing from Johannes. - Support for blk-throttle for tracking reads and writes separately. From Joseph Qi. A few cleanups/fixes also for blk-throttle from Weiping. - Series from Mike Snitzer that enables dm to register its queue more logically, something that's alwways been problematic on dm since it's a stacked device. - Series from Ming cleaning up some of the bio accessor use, in preparation for supporting multipage bvecs. - Various fixes from Ming closing up holes around queue mapping and quiescing. - BSD partition fix from Richard Narron, fixing a problem where we can't mount newer (10/11) FreeBSD partitions. - Series from Tejun reworking blk-mq timeout handling. The previous scheme relied on atomic bits, but it had races where we would think a request had timed out if it to reused at the wrong time. - null_blk now supports faking timeouts, to enable us to better exercise and test that functionality separately. From me. - Kill the separate atomic poll bit in the request struct. After this, we don't use the atomic bits on blk-mq anymore at all. From me. - sgl_alloc/free helpers from Bart. - Heavily contended tag case scalability improvement from me. - Various little fixes and cleanups from Arnd, Bart, Corentin, Douglas, Eryu, Goldwyn, and myself" * 'for-4.16/block' of git://git.kernel.dk/linux-block: (186 commits) block: remove smart1,2.h nvme: add tracepoint for nvme_complete_rq nvme: add tracepoint for nvme_setup_cmd nvme-pci: introduce RECONNECTING state to mark initializing procedure nvme-rdma: remove redundant boolean for inline_data nvme: don't free uuid pointer before printing it nvme-pci: Suspend queues after deleting them bsg: use pr_debug instead of hand crafted macros blk-mq-debugfs: don't allow write on attributes with seq_operations set nvme-pci: Fix queue double allocations block: Set BIO_TRACE_COMPLETION on new bio during split blk-throttle: use queue_is_rq_based block: Remove kblockd_schedule_delayed_work{,_on}() blk-mq: Avoid that blk_mq_delay_run_hw_queue() introduces unintended delays blk-mq: Rename blk_mq_request_direct_issue() into blk_mq_request_issue_directly() lib/scatterlist: Fix chaining support in sgl_alloc_order() blk-throttle: track read and write request individually block: add bdev_read_only() checks to common helpers block: fail op_is_write() requests to read-only partitions blk-throttle: export io_serviced_recursive, io_service_bytes_recursive ...
This commit is contained in:
@@ -5,6 +5,7 @@ menuconfig TARGET_CORE
|
||||
select CONFIGFS_FS
|
||||
select CRC_T10DIF
|
||||
select BLK_SCSI_REQUEST # only for scsi_command_size_tbl..
|
||||
select SGL_ALLOC
|
||||
default n
|
||||
help
|
||||
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
|
||||
|
@@ -2300,13 +2300,7 @@ queue_full:
|
||||
|
||||
void target_free_sgl(struct scatterlist *sgl, int nents)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int count;
|
||||
|
||||
for_each_sg(sgl, sg, nents, count)
|
||||
__free_page(sg_page(sg));
|
||||
|
||||
kfree(sgl);
|
||||
sgl_free_n_order(sgl, nents, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(target_free_sgl);
|
||||
|
||||
@@ -2414,42 +2408,10 @@ int
|
||||
target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
|
||||
bool zero_page, bool chainable)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
struct page *page;
|
||||
gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
|
||||
unsigned int nalloc, nent;
|
||||
int i = 0;
|
||||
gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
|
||||
|
||||
nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
|
||||
if (chainable)
|
||||
nalloc++;
|
||||
sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
|
||||
if (!sg)
|
||||
return -ENOMEM;
|
||||
|
||||
sg_init_table(sg, nalloc);
|
||||
|
||||
while (length) {
|
||||
u32 page_len = min_t(u32, length, PAGE_SIZE);
|
||||
page = alloc_page(GFP_KERNEL | zero_flag);
|
||||
if (!page)
|
||||
goto out;
|
||||
|
||||
sg_set_page(&sg[i], page, page_len, 0);
|
||||
length -= page_len;
|
||||
i++;
|
||||
}
|
||||
*sgl = sg;
|
||||
*nents = nent;
|
||||
return 0;
|
||||
|
||||
out:
|
||||
while (i > 0) {
|
||||
i--;
|
||||
__free_page(sg_page(&sg[i]));
|
||||
}
|
||||
kfree(sg);
|
||||
return -ENOMEM;
|
||||
*sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
|
||||
return *sgl ? 0 : -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL(target_alloc_sgl);
|
||||
|
||||
|
Reference in New Issue
Block a user