Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe: "Final small batch of fixes to be included before -rc1. Some general cleanups in here as well, but some of the blk-mq fixes we need for the NVMe conversion and/or scsi-mq. The pull request contains: - Support for not merging across a specified "chunk size", if set by the driver. Some NVMe devices perform poorly for IO that crosses such a chunk, so we need to support it generically as part of request merging avoid having to do complicated split logic. From me. - Bump max tag depth to 10Ki tags. Some scsi devices have a huge shared tag space. Before we failed with EINVAL if a too large tag depth was specified, now we truncate it and pass back the actual value. From me. - Various blk-mq rq init fixes from me and others. - A fix for enter on a dying queue for blk-mq from Keith. This is needed to prevent oopsing on hot device removal. - Fixup for blk-mq timer addition from Ming Lei. - Small round of performance fixes for mtip32xx from Sam Bradshaw. - Minor stack leak fix from Rickard Strandqvist. - Two __init annotations from Fabian Frederick" * 'for-linus' of git://git.kernel.dk/linux-block: block: add __init to blkcg_policy_register block: add __init to elv_register block: ensure that bio_add_page() always accepts a page for an empty bio blk-mq: add timer in blk_mq_start_request blk-mq: always initialize request->start_time block: blk-exec.c: Cleaning up local variable address returnd mtip32xx: minor performance enhancements blk-mq: ->timeout should be cleared in blk_mq_rq_ctx_init() blk-mq: don't allow queue entering for a dying queue blk-mq: bump max tag depth to 10K tags block: add blk_rq_set_block_pc() block: add notion of a chunk size for request merging
This commit is contained in:
@@ -39,6 +39,7 @@
|
||||
#include <../drivers/ata/ahci.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include "mtip32xx.h"
|
||||
|
||||
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
|
||||
@@ -2380,6 +2381,8 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
|
||||
/* Map the scatter list for DMA access */
|
||||
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
|
||||
|
||||
prefetch(&port->flags);
|
||||
|
||||
command->scatter_ents = nents;
|
||||
|
||||
/*
|
||||
@@ -2392,7 +2395,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
|
||||
fis = command->command;
|
||||
fis->type = 0x27;
|
||||
fis->opts = 1 << 7;
|
||||
if (rq_data_dir(rq) == READ)
|
||||
if (dma_dir == DMA_FROM_DEVICE)
|
||||
fis->command = ATA_CMD_FPDMA_READ;
|
||||
else
|
||||
fis->command = ATA_CMD_FPDMA_WRITE;
|
||||
@@ -2412,7 +2415,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
|
||||
fis->res3 = 0;
|
||||
fill_command_sg(dd, command, nents);
|
||||
|
||||
if (command->unaligned)
|
||||
if (unlikely(command->unaligned))
|
||||
fis->device |= 1 << 7;
|
||||
|
||||
/* Populate the command header */
|
||||
@@ -2433,7 +2436,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
|
||||
* To prevent this command from being issued
|
||||
* if an internal command is in progress or error handling is active.
|
||||
*/
|
||||
if (port->flags & MTIP_PF_PAUSE_IO) {
|
||||
if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
|
||||
set_bit(rq->tag, port->cmds_to_issue);
|
||||
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
|
||||
return;
|
||||
@@ -3754,7 +3757,7 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
|
||||
struct driver_data *dd = hctx->queue->queuedata;
|
||||
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
if (!dd->unal_qdepth || rq_data_dir(rq) == READ)
|
||||
if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
|
||||
return false;
|
||||
|
||||
/*
|
||||
@@ -3776,11 +3779,11 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (mtip_check_unal_depth(hctx, rq))
|
||||
if (unlikely(mtip_check_unal_depth(hctx, rq)))
|
||||
return BLK_MQ_RQ_QUEUE_BUSY;
|
||||
|
||||
ret = mtip_submit_request(hctx, rq);
|
||||
if (!ret)
|
||||
if (likely(!ret))
|
||||
return BLK_MQ_RQ_QUEUE_OK;
|
||||
|
||||
rq->errors = ret;
|
||||
|
@@ -493,19 +493,19 @@ struct driver_data {
|
||||
|
||||
struct workqueue_struct *isr_workq;
|
||||
|
||||
struct mtip_work work[MTIP_MAX_SLOT_GROUPS];
|
||||
|
||||
atomic_t irq_workers_active;
|
||||
|
||||
struct mtip_work work[MTIP_MAX_SLOT_GROUPS];
|
||||
|
||||
int isr_binding;
|
||||
|
||||
struct block_device *bdev;
|
||||
|
||||
int unal_qdepth; /* qdepth of unaligned IO queue */
|
||||
|
||||
struct list_head online_list; /* linkage for online list */
|
||||
|
||||
struct list_head remove_list; /* linkage for removing list */
|
||||
|
||||
int unal_qdepth; /* qdepth of unaligned IO queue */
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@@ -704,6 +704,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
|
||||
|
||||
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
|
||||
WRITE : READ, __GFP_WAIT);
|
||||
blk_rq_set_block_pc(rq);
|
||||
|
||||
if (cgc->buflen) {
|
||||
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
|
||||
@@ -716,7 +717,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
|
||||
memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
|
||||
|
||||
rq->timeout = 60*HZ;
|
||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
if (cgc->quiet)
|
||||
rq->cmd_flags |= REQ_QUIET;
|
||||
|
||||
|
Reference in New Issue
Block a user