Merge branch 'for-4.16/block' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "This is the main pull request for block IO related changes for the 4.16 kernel. Nothing major in this pull request, but a good amount of improvements and fixes all over the map. This contains: - BFQ improvements, fixes, and cleanups from Angelo, Chiara, and Paolo. - Support for SMR zones for deadline and mq-deadline from Damien and Christoph. - Set of fixes for bcache by way of Michael Lyle, including fixes from himself, Kent, Rui, Tang, and Coly. - Series from Matias for lightnvm with fixes from Hans Holmberg, Javier, and Matias. Mostly centered around pblk, and the removing rrpc 1.2 in preparation for supporting 2.0. - A couple of NVMe pull requests from Christoph. Nothing major in here, just fixes and cleanups, and support for command tracing from Johannes. - Support for blk-throttle for tracking reads and writes separately. From Joseph Qi. A few cleanups/fixes also for blk-throttle from Weiping. - Series from Mike Snitzer that enables dm to register its queue more logically, something that's alwways been problematic on dm since it's a stacked device. - Series from Ming cleaning up some of the bio accessor use, in preparation for supporting multipage bvecs. - Various fixes from Ming closing up holes around queue mapping and quiescing. - BSD partition fix from Richard Narron, fixing a problem where we can't mount newer (10/11) FreeBSD partitions. - Series from Tejun reworking blk-mq timeout handling. The previous scheme relied on atomic bits, but it had races where we would think a request had timed out if it to reused at the wrong time. - null_blk now supports faking timeouts, to enable us to better exercise and test that functionality separately. From me. - Kill the separate atomic poll bit in the request struct. After this, we don't use the atomic bits on blk-mq anymore at all. From me. - sgl_alloc/free helpers from Bart. - Heavily contended tag case scalability improvement from me. - Various little fixes and cleanups from Arnd, Bart, Corentin, Douglas, Eryu, Goldwyn, and myself" * 'for-4.16/block' of git://git.kernel.dk/linux-block: (186 commits) block: remove smart1,2.h nvme: add tracepoint for nvme_complete_rq nvme: add tracepoint for nvme_setup_cmd nvme-pci: introduce RECONNECTING state to mark initializing procedure nvme-rdma: remove redundant boolean for inline_data nvme: don't free uuid pointer before printing it nvme-pci: Suspend queues after deleting them bsg: use pr_debug instead of hand crafted macros blk-mq-debugfs: don't allow write on attributes with seq_operations set nvme-pci: Fix queue double allocations block: Set BIO_TRACE_COMPLETION on new bio during split blk-throttle: use queue_is_rq_based block: Remove kblockd_schedule_delayed_work{,_on}() blk-mq: Avoid that blk_mq_delay_run_hw_queue() introduces unintended delays blk-mq: Rename blk_mq_request_direct_issue() into blk_mq_request_issue_directly() lib/scatterlist: Fix chaining support in sgl_alloc_order() blk-throttle: track read and write request individually block: add bdev_read_only() checks to common helpers block: fail op_is_write() requests to read-only partitions blk-throttle: export io_serviced_recursive, io_service_bytes_recursive ...
This commit is contained in:
@@ -39,7 +39,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
|
||||
}
|
||||
}
|
||||
|
||||
static cpumask_var_t *alloc_node_to_present_cpumask(void)
|
||||
static cpumask_var_t *alloc_node_to_possible_cpumask(void)
|
||||
{
|
||||
cpumask_var_t *masks;
|
||||
int node;
|
||||
@@ -62,7 +62,7 @@ out_unwind:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void free_node_to_present_cpumask(cpumask_var_t *masks)
|
||||
static void free_node_to_possible_cpumask(cpumask_var_t *masks)
|
||||
{
|
||||
int node;
|
||||
|
||||
@@ -71,22 +71,22 @@ static void free_node_to_present_cpumask(cpumask_var_t *masks)
|
||||
kfree(masks);
|
||||
}
|
||||
|
||||
static void build_node_to_present_cpumask(cpumask_var_t *masks)
|
||||
static void build_node_to_possible_cpumask(cpumask_var_t *masks)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_present_cpu(cpu)
|
||||
for_each_possible_cpu(cpu)
|
||||
cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
|
||||
}
|
||||
|
||||
static int get_nodes_in_cpumask(cpumask_var_t *node_to_present_cpumask,
|
||||
static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask,
|
||||
const struct cpumask *mask, nodemask_t *nodemsk)
|
||||
{
|
||||
int n, nodes = 0;
|
||||
|
||||
/* Calculate the number of nodes in the supplied affinity mask */
|
||||
for_each_node(n) {
|
||||
if (cpumask_intersects(mask, node_to_present_cpumask[n])) {
|
||||
if (cpumask_intersects(mask, node_to_possible_cpumask[n])) {
|
||||
node_set(n, *nodemsk);
|
||||
nodes++;
|
||||
}
|
||||
@@ -109,7 +109,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
int last_affv = affv + affd->pre_vectors;
|
||||
nodemask_t nodemsk = NODE_MASK_NONE;
|
||||
struct cpumask *masks;
|
||||
cpumask_var_t nmsk, *node_to_present_cpumask;
|
||||
cpumask_var_t nmsk, *node_to_possible_cpumask;
|
||||
|
||||
/*
|
||||
* If there aren't any vectors left after applying the pre/post
|
||||
@@ -125,8 +125,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
if (!masks)
|
||||
goto out;
|
||||
|
||||
node_to_present_cpumask = alloc_node_to_present_cpumask();
|
||||
if (!node_to_present_cpumask)
|
||||
node_to_possible_cpumask = alloc_node_to_possible_cpumask();
|
||||
if (!node_to_possible_cpumask)
|
||||
goto out;
|
||||
|
||||
/* Fill out vectors at the beginning that don't need affinity */
|
||||
@@ -135,8 +135,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
|
||||
/* Stabilize the cpumasks */
|
||||
get_online_cpus();
|
||||
build_node_to_present_cpumask(node_to_present_cpumask);
|
||||
nodes = get_nodes_in_cpumask(node_to_present_cpumask, cpu_present_mask,
|
||||
build_node_to_possible_cpumask(node_to_possible_cpumask);
|
||||
nodes = get_nodes_in_cpumask(node_to_possible_cpumask, cpu_possible_mask,
|
||||
&nodemsk);
|
||||
|
||||
/*
|
||||
@@ -146,7 +146,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
if (affv <= nodes) {
|
||||
for_each_node_mask(n, nodemsk) {
|
||||
cpumask_copy(masks + curvec,
|
||||
node_to_present_cpumask[n]);
|
||||
node_to_possible_cpumask[n]);
|
||||
if (++curvec == last_affv)
|
||||
break;
|
||||
}
|
||||
@@ -160,7 +160,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
|
||||
|
||||
/* Get the cpus on this node which are in the mask */
|
||||
cpumask_and(nmsk, cpu_present_mask, node_to_present_cpumask[n]);
|
||||
cpumask_and(nmsk, cpu_possible_mask, node_to_possible_cpumask[n]);
|
||||
|
||||
/* Calculate the number of cpus per vector */
|
||||
ncpus = cpumask_weight(nmsk);
|
||||
@@ -192,7 +192,7 @@ done:
|
||||
/* Fill out vectors at the end that don't need affinity */
|
||||
for (; curvec < nvecs; curvec++)
|
||||
cpumask_copy(masks + curvec, irq_default_affinity);
|
||||
free_node_to_present_cpumask(node_to_present_cpumask);
|
||||
free_node_to_possible_cpumask(node_to_possible_cpumask);
|
||||
out:
|
||||
free_cpumask_var(nmsk);
|
||||
return masks;
|
||||
@@ -214,7 +214,7 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity
|
||||
return 0;
|
||||
|
||||
get_online_cpus();
|
||||
ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv;
|
||||
ret = min_t(int, cpumask_weight(cpu_possible_mask), vecs) + resv;
|
||||
put_online_cpus();
|
||||
return ret;
|
||||
}
|
||||
|
Reference in New Issue
Block a user