block: kill QUEUE_FLAG_FLUSH_NQ
We have various helpers for setting/clearing this flag, and also a helper to check if the queue supports queueable flushes or not. But nobody uses them anymore, kill it with fire. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -132,7 +132,6 @@ static const char *const blk_queue_flag_name[] = {
|
|||||||
QUEUE_FLAG_NAME(POLL),
|
QUEUE_FLAG_NAME(POLL),
|
||||||
QUEUE_FLAG_NAME(WC),
|
QUEUE_FLAG_NAME(WC),
|
||||||
QUEUE_FLAG_NAME(FUA),
|
QUEUE_FLAG_NAME(FUA),
|
||||||
QUEUE_FLAG_NAME(FLUSH_NQ),
|
|
||||||
QUEUE_FLAG_NAME(DAX),
|
QUEUE_FLAG_NAME(DAX),
|
||||||
QUEUE_FLAG_NAME(STATS),
|
QUEUE_FLAG_NAME(STATS),
|
||||||
QUEUE_FLAG_NAME(POLL_STATS),
|
QUEUE_FLAG_NAME(POLL_STATS),
|
||||||
|
@@ -799,15 +799,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
||||||
|
|
||||||
void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
|
|
||||||
{
|
|
||||||
if (queueable)
|
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
|
|
||||||
else
|
|
||||||
blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_set_queue_depth - tell the block layer about the device queue depth
|
* blk_set_queue_depth - tell the block layer about the device queue depth
|
||||||
* @q: the request queue for the device
|
* @q: the request queue for the device
|
||||||
|
@@ -1318,8 +1318,6 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
|
|||||||
scsi_change_queue_depth(sdev, depth);
|
scsi_change_queue_depth(sdev, depth);
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_queue_flush_queueable(q, false);
|
|
||||||
|
|
||||||
if (dev->flags & ATA_DFLAG_TRUSTED)
|
if (dev->flags & ATA_DFLAG_TRUSTED)
|
||||||
sdev->security_supported = 1;
|
sdev->security_supported = 1;
|
||||||
|
|
||||||
|
@@ -1678,7 +1678,6 @@ static int null_add_dev(struct nullb_device *dev)
|
|||||||
if (dev->cache_size > 0) {
|
if (dev->cache_size > 0) {
|
||||||
set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
|
set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
|
||||||
blk_queue_write_cache(nullb->q, true, true);
|
blk_queue_write_cache(nullb->q, true, true);
|
||||||
blk_queue_flush_queueable(nullb->q, true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dev->zoned) {
|
if (dev->zoned) {
|
||||||
|
@@ -592,7 +592,6 @@ struct request_queue {
|
|||||||
#define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */
|
#define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */
|
||||||
#define QUEUE_FLAG_WC 20 /* Write back caching */
|
#define QUEUE_FLAG_WC 20 /* Write back caching */
|
||||||
#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
|
#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
|
||||||
#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */
|
|
||||||
#define QUEUE_FLAG_DAX 23 /* device supports DAX */
|
#define QUEUE_FLAG_DAX 23 /* device supports DAX */
|
||||||
#define QUEUE_FLAG_STATS 24 /* track IO start and completion times */
|
#define QUEUE_FLAG_STATS 24 /* track IO start and completion times */
|
||||||
#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
|
#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
|
||||||
@@ -1069,7 +1068,6 @@ extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
|
|||||||
extern void blk_queue_dma_alignment(struct request_queue *, int);
|
extern void blk_queue_dma_alignment(struct request_queue *, int);
|
||||||
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
|
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
|
||||||
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
|
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
|
||||||
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
|
|
||||||
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
|
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1446,11 +1444,6 @@ static inline unsigned int block_size(struct block_device *bdev)
|
|||||||
return bdev->bd_block_size;
|
return bdev->bd_block_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool queue_flush_queueable(struct request_queue *q)
|
|
||||||
{
|
|
||||||
return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef struct {struct page *v;} Sector;
|
typedef struct {struct page *v;} Sector;
|
||||||
|
|
||||||
unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
|
unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
|
||||||
|
Reference in New Issue
Block a user