Merge tag 'for-4.20/block-20181021' of git://git.kernel.dk/linux-block
Pull block layer updates from Jens Axboe: "This is the main pull request for block changes for 4.20. This contains: - Series enabling runtime PM for blk-mq (Bart). - Two pull requests from Christoph for NVMe, with items such as; - Better AEN tracking - Multipath improvements - RDMA fixes - Rework of FC for target removal - Fixes for issues identified by static checkers - Fabric cleanups, as prep for TCP transport - Various cleanups and bug fixes - Block merging cleanups (Christoph) - Conversion of drivers to generic DMA mapping API (Christoph) - Series fixing ref count issues with blkcg (Dennis) - Series improving BFQ heuristics (Paolo, et al) - Series improving heuristics for the Kyber IO scheduler (Omar) - Removal of dangerous bio_rewind_iter() API (Ming) - Apply single queue IPI redirection logic to blk-mq (Ming) - Set of fixes and improvements for bcache (Coly et al) - Series closing a hotplug race with sysfs group attributes (Hannes) - Set of patches for lightnvm: - pblk trace support (Hans) - SPDX license header update (Javier) - Tons of refactoring patches to cleanly abstract the 1.2 and 2.0 specs behind a common core interface. (Javier, Matias) - Enable pblk to use a common interface to retrieve chunk metadata (Matias) - Bug fixes (Various) - Set of fixes and updates to the blk IO latency target (Josef) - blk-mq queue number updates fixes (Jianchao) - Convert a bunch of drivers from the old legacy IO interface to blk-mq. This will conclude with the removal of the legacy IO interface itself in 4.21, with the rest of the drivers (me, Omar) - Removal of the DAC960 driver. The SCSI tree will introduce two replacement drivers for this (Hannes)" * tag 'for-4.20/block-20181021' of git://git.kernel.dk/linux-block: (204 commits) block: setup bounce bio_sets properly blkcg: reassociate bios when make_request() is called recursively blkcg: fix edge case for blk_get_rl() under memory pressure nvme-fabrics: move controller options matching to fabrics nvme-rdma: always have a valid trsvcid mtip32xx: fully switch to the generic DMA API rsxx: switch to the generic DMA API umem: switch to the generic DMA API sx8: switch to the generic DMA API sx8: remove dead IF_64BIT_DMA_IS_POSSIBLE code skd: switch to the generic DMA API ubd: remove use of blk_rq_map_sg nvme-pci: remove duplicate check drivers/block: Remove DAC960 driver nvme-pci: fix hot removal during error handling nvmet-fcloop: suppress a compiler warning nvme-core: make implicit seed truncation explicit nvmet-fc: fix kernel-doc headers nvme-fc: rework the request initialization code nvme-fc: introduce struct nvme_fcp_op_w_sgl ...
This commit is contained in:
@@ -1,63 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _AMIFD_H
|
||||
#define _AMIFD_H
|
||||
|
||||
/* Definitions for the Amiga floppy driver */
|
||||
|
||||
#include <linux/fd.h>
|
||||
|
||||
#define FD_MAX_UNITS 4 /* Max. Number of drives */
|
||||
#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
|
||||
|
||||
#ifndef ASSEMBLER
|
||||
|
||||
struct fd_data_type {
|
||||
char *name; /* description of data type */
|
||||
int sects; /* sectors per track */
|
||||
#ifdef __STDC__
|
||||
int (*read_fkt)(int);
|
||||
void (*write_fkt)(int);
|
||||
#else
|
||||
int (*read_fkt)(); /* read whole track */
|
||||
void (*write_fkt)(); /* write whole track */
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
** Floppy type descriptions
|
||||
*/
|
||||
|
||||
struct fd_drive_type {
|
||||
unsigned long code; /* code returned from drive */
|
||||
char *name; /* description of drive */
|
||||
unsigned int tracks; /* number of tracks */
|
||||
unsigned int heads; /* number of heads */
|
||||
unsigned int read_size; /* raw read size for one track */
|
||||
unsigned int write_size; /* raw write size for one track */
|
||||
unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
|
||||
unsigned int precomp1; /* start track for precomp 1 */
|
||||
unsigned int precomp2; /* start track for precomp 2 */
|
||||
unsigned int step_delay; /* time (in ms) for delay after step */
|
||||
unsigned int settle_time; /* time to settle after dir change */
|
||||
unsigned int side_time; /* time needed to change sides */
|
||||
};
|
||||
|
||||
struct amiga_floppy_struct {
|
||||
struct fd_drive_type *type; /* type of floppy for this unit */
|
||||
struct fd_data_type *dtype; /* type of floppy for this unit */
|
||||
int track; /* current track (-1 == unknown) */
|
||||
unsigned char *trackbuf; /* current track (kmaloc()'d */
|
||||
|
||||
int blocks; /* total # blocks on disk */
|
||||
|
||||
int changed; /* true when not known */
|
||||
int disk; /* disk in drive (-1 == unknown) */
|
||||
int motor; /* true when motor is at speed */
|
||||
int busy; /* true when drive is active */
|
||||
int dirty; /* true when trackbuf is not on disk */
|
||||
int status; /* current error code for unit */
|
||||
struct gendisk *gendisk;
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif
|
@@ -1,82 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_AMIFDREG_H
|
||||
#define _LINUX_AMIFDREG_H
|
||||
|
||||
/*
|
||||
** CIAAPRA bits (read only)
|
||||
*/
|
||||
|
||||
#define DSKRDY (0x1<<5) /* disk ready when low */
|
||||
#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
|
||||
#define DSKPROT (0x1<<3) /* disk protected when low */
|
||||
#define DSKCHANGE (0x1<<2) /* low when disk removed */
|
||||
|
||||
/*
|
||||
** CIAAPRB bits (read/write)
|
||||
*/
|
||||
|
||||
#define DSKMOTOR (0x1<<7) /* motor on when low */
|
||||
#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
|
||||
#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
|
||||
#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
|
||||
#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
|
||||
#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
|
||||
#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
|
||||
#define DSKSTEP (0x1) /* pulse low to step head 1 track */
|
||||
|
||||
/*
|
||||
** DSKBYTR bits (read only)
|
||||
*/
|
||||
|
||||
#define DSKBYT (1<<15) /* register contains valid byte when set */
|
||||
#define DMAON (1<<14) /* disk DMA enabled */
|
||||
#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
|
||||
#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
|
||||
/* bits 7-0 are data */
|
||||
|
||||
/*
|
||||
** ADKCON/ADKCONR bits
|
||||
*/
|
||||
|
||||
#ifndef SETCLR
|
||||
#define ADK_SETCLR (1<<15) /* control bit */
|
||||
#endif
|
||||
#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
|
||||
#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
|
||||
#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
|
||||
#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
|
||||
#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
|
||||
#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
|
||||
|
||||
/*
|
||||
** DSKLEN bits
|
||||
*/
|
||||
|
||||
#define DSKLEN_DMAEN (1<<15)
|
||||
#define DSKLEN_WRITE (1<<14)
|
||||
|
||||
/*
|
||||
** INTENA/INTREQ bits
|
||||
*/
|
||||
|
||||
#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
|
||||
|
||||
/*
|
||||
** Misc
|
||||
*/
|
||||
|
||||
#define MFM_SYNC 0x4489 /* standard MFM sync value */
|
||||
|
||||
/* Values for FD_COMMAND */
|
||||
#define FD_RECALIBRATE 0x07 /* move to track 0 */
|
||||
#define FD_SEEK 0x0F /* seek track */
|
||||
#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
|
||||
#define FD_WRITE 0xC5 /* write with MT, MFM */
|
||||
#define FD_SENSEI 0x08 /* Sense Interrupt Status */
|
||||
#define FD_SPECIFY 0x03 /* specify HUT etc */
|
||||
#define FD_FORMAT 0x4D /* format one track */
|
||||
#define FD_VERSION 0x10 /* get version code */
|
||||
#define FD_CONFIGURE 0x13 /* configure FIFO operation */
|
||||
#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
|
||||
|
||||
#endif /* _LINUX_AMIFDREG_H */
|
@@ -21,12 +21,8 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/mempool.h>
|
||||
#include <linux/ioprio.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
#include <asm/io.h>
|
||||
|
||||
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
|
||||
#include <linux/blk_types.h>
|
||||
|
||||
@@ -132,32 +128,6 @@ static inline bool bio_full(struct bio *bio)
|
||||
return bio->bi_vcnt >= bio->bi_max_vecs;
|
||||
}
|
||||
|
||||
/*
|
||||
* will die
|
||||
*/
|
||||
#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
|
||||
|
||||
/*
|
||||
* merge helpers etc
|
||||
*/
|
||||
|
||||
/* Default implementation of BIOVEC_PHYS_MERGEABLE */
|
||||
#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
|
||||
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
|
||||
|
||||
/*
|
||||
* allow arch override, for eg virtualized architectures (put in asm/io.h)
|
||||
*/
|
||||
#ifndef BIOVEC_PHYS_MERGEABLE
|
||||
#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
|
||||
__BIOVEC_PHYS_MERGEABLE(vec1, vec2)
|
||||
#endif
|
||||
|
||||
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
|
||||
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
|
||||
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
|
||||
__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
|
||||
|
||||
/*
|
||||
* drivers should _never_ use the all version - the bio may have been split
|
||||
* before it got to the driver and the driver won't own all of it
|
||||
@@ -170,27 +140,11 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
|
||||
{
|
||||
iter->bi_sector += bytes >> 9;
|
||||
|
||||
if (bio_no_advance_iter(bio)) {
|
||||
if (bio_no_advance_iter(bio))
|
||||
iter->bi_size -= bytes;
|
||||
iter->bi_done += bytes;
|
||||
} else {
|
||||
else
|
||||
bvec_iter_advance(bio->bi_io_vec, iter, bytes);
|
||||
/* TODO: It is reasonable to complete bio with error here. */
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter,
|
||||
unsigned int bytes)
|
||||
{
|
||||
iter->bi_sector -= bytes >> 9;
|
||||
|
||||
if (bio_no_advance_iter(bio)) {
|
||||
iter->bi_size += bytes;
|
||||
iter->bi_done -= bytes;
|
||||
return true;
|
||||
}
|
||||
|
||||
return bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
|
||||
}
|
||||
|
||||
#define __bio_for_each_segment(bvl, bio, iter, start) \
|
||||
@@ -353,6 +307,8 @@ struct bio_integrity_payload {
|
||||
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
|
||||
unsigned short bip_flags; /* control flags */
|
||||
|
||||
struct bvec_iter bio_iter; /* for rewinding parent bio */
|
||||
|
||||
struct work_struct bip_work; /* I/O completion */
|
||||
|
||||
struct bio_vec *bip_vec;
|
||||
@@ -547,23 +503,31 @@ do { \
|
||||
disk_devt((bio)->bi_disk)
|
||||
|
||||
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
|
||||
int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
|
||||
int bio_associate_blkg_from_page(struct bio *bio, struct page *page);
|
||||
#else
|
||||
static inline int bio_associate_blkcg_from_page(struct bio *bio,
|
||||
struct page *page) { return 0; }
|
||||
static inline int bio_associate_blkg_from_page(struct bio *bio,
|
||||
struct page *page) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
|
||||
int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
|
||||
int bio_associate_blkg_from_css(struct bio *bio,
|
||||
struct cgroup_subsys_state *css);
|
||||
int bio_associate_create_blkg(struct request_queue *q, struct bio *bio);
|
||||
int bio_reassociate_blkg(struct request_queue *q, struct bio *bio);
|
||||
void bio_disassociate_task(struct bio *bio);
|
||||
void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
|
||||
void bio_clone_blkg_association(struct bio *dst, struct bio *src);
|
||||
#else /* CONFIG_BLK_CGROUP */
|
||||
static inline int bio_associate_blkcg(struct bio *bio,
|
||||
struct cgroup_subsys_state *blkcg_css) { return 0; }
|
||||
static inline int bio_associate_blkg_from_css(struct bio *bio,
|
||||
struct cgroup_subsys_state *css)
|
||||
{ return 0; }
|
||||
static inline int bio_associate_create_blkg(struct request_queue *q,
|
||||
struct bio *bio) { return 0; }
|
||||
static inline int bio_reassociate_blkg(struct request_queue *q, struct bio *bio)
|
||||
{ return 0; }
|
||||
static inline void bio_disassociate_task(struct bio *bio) { }
|
||||
static inline void bio_clone_blkcg_association(struct bio *dst,
|
||||
struct bio *src) { }
|
||||
static inline void bio_clone_blkg_association(struct bio *dst,
|
||||
struct bio *src) { }
|
||||
#endif /* CONFIG_BLK_CGROUP */
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
|
@@ -126,7 +126,7 @@ struct blkcg_gq {
|
||||
struct request_list rl;
|
||||
|
||||
/* reference count */
|
||||
atomic_t refcnt;
|
||||
struct percpu_ref refcnt;
|
||||
|
||||
/* is this blkg online? protected by both blkcg and q locks */
|
||||
bool online;
|
||||
@@ -184,6 +184,8 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
|
||||
|
||||
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
|
||||
struct request_queue *q, bool update_hint);
|
||||
struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
|
||||
struct request_queue *q);
|
||||
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
|
||||
struct request_queue *q);
|
||||
int blkcg_init_queue(struct request_queue *q);
|
||||
@@ -230,22 +232,59 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
||||
char *input, struct blkg_conf_ctx *ctx);
|
||||
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
|
||||
|
||||
/**
|
||||
* blkcg_css - find the current css
|
||||
*
|
||||
* Find the css associated with either the kthread or the current task.
|
||||
* This may return a dying css, so it is up to the caller to use tryget logic
|
||||
* to confirm it is alive and well.
|
||||
*/
|
||||
static inline struct cgroup_subsys_state *blkcg_css(void)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
css = kthread_blkcg();
|
||||
if (css)
|
||||
return css;
|
||||
return task_css(current, io_cgrp_id);
|
||||
}
|
||||
|
||||
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return css ? container_of(css, struct blkcg, css) : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* __bio_blkcg - internal version of bio_blkcg for bfq and cfq
|
||||
*
|
||||
* DO NOT USE.
|
||||
* There is a flaw using this version of the function. In particular, this was
|
||||
* used in a broken paradigm where association was called on the given css. It
|
||||
* is possible though that the returned css from task_css() is in the process
|
||||
* of dying due to migration of the current task. So it is improper to assume
|
||||
* *_get() is going to succeed. Both BFQ and CFQ rely on this logic and will
|
||||
* take additional work to handle more gracefully.
|
||||
*/
|
||||
static inline struct blkcg *__bio_blkcg(struct bio *bio)
|
||||
{
|
||||
if (bio && bio->bi_blkg)
|
||||
return bio->bi_blkg->blkcg;
|
||||
return css_to_blkcg(blkcg_css());
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_blkcg - grab the blkcg associated with a bio
|
||||
* @bio: target bio
|
||||
*
|
||||
* This returns the blkcg associated with a bio, NULL if not associated.
|
||||
* Callers are expected to either handle NULL or know association has been
|
||||
* done prior to calling this.
|
||||
*/
|
||||
static inline struct blkcg *bio_blkcg(struct bio *bio)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
if (bio && bio->bi_css)
|
||||
return css_to_blkcg(bio->bi_css);
|
||||
css = kthread_blkcg();
|
||||
if (css)
|
||||
return css_to_blkcg(css);
|
||||
return css_to_blkcg(task_css(current, io_cgrp_id));
|
||||
if (bio && bio->bi_blkg)
|
||||
return bio->bi_blkg->blkcg;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool blk_cgroup_congested(void)
|
||||
@@ -451,26 +490,35 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
|
||||
*/
|
||||
static inline void blkg_get(struct blkcg_gq *blkg)
|
||||
{
|
||||
WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
|
||||
atomic_inc(&blkg->refcnt);
|
||||
percpu_ref_get(&blkg->refcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_try_get - try and get a blkg reference
|
||||
* blkg_tryget - try and get a blkg reference
|
||||
* @blkg: blkg to get
|
||||
*
|
||||
* This is for use when doing an RCU lookup of the blkg. We may be in the midst
|
||||
* of freeing this blkg, so we can only use it if the refcnt is not zero.
|
||||
*/
|
||||
static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
|
||||
static inline bool blkg_tryget(struct blkcg_gq *blkg)
|
||||
{
|
||||
if (atomic_inc_not_zero(&blkg->refcnt))
|
||||
return blkg;
|
||||
return NULL;
|
||||
return percpu_ref_tryget(&blkg->refcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_tryget_closest - try and get a blkg ref on the closet blkg
|
||||
* @blkg: blkg to get
|
||||
*
|
||||
* This walks up the blkg tree to find the closest non-dying blkg and returns
|
||||
* the blkg that it did association with as it may not be the passed in blkg.
|
||||
*/
|
||||
static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
|
||||
{
|
||||
while (!percpu_ref_tryget(&blkg->refcnt))
|
||||
blkg = blkg->parent;
|
||||
|
||||
void __blkg_release_rcu(struct rcu_head *rcu);
|
||||
return blkg;
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_put - put a blkg reference
|
||||
@@ -478,9 +526,7 @@ void __blkg_release_rcu(struct rcu_head *rcu);
|
||||
*/
|
||||
static inline void blkg_put(struct blkcg_gq *blkg)
|
||||
{
|
||||
WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
|
||||
if (atomic_dec_and_test(&blkg->refcnt))
|
||||
call_rcu(&blkg->rcu_head, __blkg_release_rcu);
|
||||
percpu_ref_put(&blkg->refcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -533,25 +579,36 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
blkcg = bio_blkcg(bio);
|
||||
if (bio && bio->bi_blkg) {
|
||||
blkcg = bio->bi_blkg->blkcg;
|
||||
if (blkcg == &blkcg_root)
|
||||
goto rl_use_root;
|
||||
|
||||
/* bypass blkg lookup and use @q->root_rl directly for root */
|
||||
blkg_get(bio->bi_blkg);
|
||||
rcu_read_unlock();
|
||||
return &bio->bi_blkg->rl;
|
||||
}
|
||||
|
||||
blkcg = css_to_blkcg(blkcg_css());
|
||||
if (blkcg == &blkcg_root)
|
||||
goto root_rl;
|
||||
goto rl_use_root;
|
||||
|
||||
/*
|
||||
* Try to use blkg->rl. blkg lookup may fail under memory pressure
|
||||
* or if either the blkcg or queue is going away. Fall back to
|
||||
* root_rl in such cases.
|
||||
*/
|
||||
blkg = blkg_lookup(blkcg, q);
|
||||
if (unlikely(!blkg))
|
||||
goto root_rl;
|
||||
blkg = __blkg_lookup_create(blkcg, q);
|
||||
|
||||
if (blkg->blkcg == &blkcg_root || !blkg_tryget(blkg))
|
||||
goto rl_use_root;
|
||||
|
||||
blkg_get(blkg);
|
||||
rcu_read_unlock();
|
||||
return &blkg->rl;
|
||||
root_rl:
|
||||
|
||||
/*
|
||||
* Each blkg has its own request_list, however, the root blkcg
|
||||
* uses the request_queue's root_rl. This is to avoid most
|
||||
* overhead for the root blkcg.
|
||||
*/
|
||||
rl_use_root:
|
||||
rcu_read_unlock();
|
||||
return &q->root_rl;
|
||||
}
|
||||
@@ -797,32 +854,26 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
|
||||
struct bio *bio) { return false; }
|
||||
#endif
|
||||
|
||||
|
||||
static inline void blkcg_bio_issue_init(struct bio *bio)
|
||||
{
|
||||
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
|
||||
}
|
||||
|
||||
static inline bool blkcg_bio_issue_check(struct request_queue *q,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct blkcg *blkcg;
|
||||
struct blkcg_gq *blkg;
|
||||
bool throtl = false;
|
||||
|
||||
rcu_read_lock();
|
||||
blkcg = bio_blkcg(bio);
|
||||
|
||||
/* associate blkcg if bio hasn't attached one */
|
||||
bio_associate_blkcg(bio, &blkcg->css);
|
||||
|
||||
blkg = blkg_lookup(blkcg, q);
|
||||
if (unlikely(!blkg)) {
|
||||
spin_lock_irq(q->queue_lock);
|
||||
blkg = blkg_lookup_create(blkcg, q);
|
||||
if (IS_ERR(blkg))
|
||||
blkg = NULL;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
bio_associate_create_blkg(q, bio);
|
||||
blkg = bio->bi_blkg;
|
||||
|
||||
throtl = blk_throtl_bio(q, blkg, bio);
|
||||
|
||||
if (!throtl) {
|
||||
blkg = blkg ?: q->root_blkg;
|
||||
/*
|
||||
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
|
||||
* is a split bio and we would have already accounted for the
|
||||
@@ -834,6 +885,8 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
|
||||
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
|
||||
}
|
||||
|
||||
blkcg_bio_issue_init(bio);
|
||||
|
||||
rcu_read_unlock();
|
||||
return !throtl;
|
||||
}
|
||||
@@ -930,6 +983,7 @@ static inline int blkcg_activate_policy(struct request_queue *q,
|
||||
static inline void blkcg_deactivate_policy(struct request_queue *q,
|
||||
const struct blkcg_policy *pol) { }
|
||||
|
||||
static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
|
||||
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
|
||||
|
||||
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
|
||||
@@ -945,6 +999,7 @@ static inline void blk_put_rl(struct request_list *rl) { }
|
||||
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
|
||||
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
|
||||
|
||||
static inline void blkcg_bio_issue_init(struct bio *bio) { }
|
||||
static inline bool blkcg_bio_issue_check(struct request_queue *q,
|
||||
struct bio *bio) { return true; }
|
||||
|
||||
|
@@ -203,6 +203,10 @@ enum {
|
||||
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
|
||||
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
struct request_queue *q);
|
||||
struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
|
||||
const struct blk_mq_ops *ops,
|
||||
unsigned int queue_depth,
|
||||
unsigned int set_flags);
|
||||
int blk_mq_register_dev(struct device *, struct request_queue *);
|
||||
void blk_mq_unregister_dev(struct device *, struct request_queue *);
|
||||
|
||||
|
24
include/linux/blk-pm.h
Normal file
24
include/linux/blk-pm.h
Normal file
@@ -0,0 +1,24 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _BLK_PM_H_
|
||||
#define _BLK_PM_H_
|
||||
|
||||
struct device;
|
||||
struct request_queue;
|
||||
|
||||
/*
|
||||
* block layer runtime pm functions
|
||||
*/
|
||||
#ifdef CONFIG_PM
|
||||
extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
|
||||
extern int blk_pre_runtime_suspend(struct request_queue *q);
|
||||
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
|
||||
extern void blk_pre_runtime_resume(struct request_queue *q);
|
||||
extern void blk_post_runtime_resume(struct request_queue *q, int err);
|
||||
extern void blk_set_runtime_active(struct request_queue *q);
|
||||
#else
|
||||
static inline void blk_pm_runtime_init(struct request_queue *q,
|
||||
struct device *dev) {}
|
||||
#endif
|
||||
|
||||
#endif /* _BLK_PM_H_ */
|
@@ -178,7 +178,6 @@ struct bio {
|
||||
* release. Read comment on top of bio_associate_current().
|
||||
*/
|
||||
struct io_context *bi_ioc;
|
||||
struct cgroup_subsys_state *bi_css;
|
||||
struct blkcg_gq *bi_blkg;
|
||||
struct bio_issue bi_issue;
|
||||
#endif
|
||||
|
@@ -108,7 +108,7 @@ typedef __u32 __bitwise req_flags_t;
|
||||
#define RQF_QUIET ((__force req_flags_t)(1 << 11))
|
||||
/* elevator private data attached */
|
||||
#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
|
||||
/* account I/O stat */
|
||||
/* account into disk and partition IO statistics */
|
||||
#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
|
||||
/* request came from our alloc pool */
|
||||
#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
|
||||
@@ -116,7 +116,7 @@ typedef __u32 __bitwise req_flags_t;
|
||||
#define RQF_PM ((__force req_flags_t)(1 << 15))
|
||||
/* on IO scheduler merge hash */
|
||||
#define RQF_HASHED ((__force req_flags_t)(1 << 16))
|
||||
/* IO stats tracking on */
|
||||
/* track IO completion time */
|
||||
#define RQF_STATS ((__force req_flags_t)(1 << 17))
|
||||
/* Look at ->special_vec for the actual data payload instead of the
|
||||
bio chain. */
|
||||
@@ -504,6 +504,12 @@ struct request_queue {
|
||||
* various queue flags, see QUEUE_* below
|
||||
*/
|
||||
unsigned long queue_flags;
|
||||
/*
|
||||
* Number of contexts that have called blk_set_pm_only(). If this
|
||||
* counter is above zero then only RQF_PM and RQF_PREEMPT requests are
|
||||
* processed.
|
||||
*/
|
||||
atomic_t pm_only;
|
||||
|
||||
/*
|
||||
* ida allocated id for this queue. Used to index queues from
|
||||
@@ -679,7 +685,7 @@ struct request_queue {
|
||||
#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
|
||||
#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
|
||||
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
|
||||
#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */
|
||||
#define QUEUE_FLAG_IO_STAT 10 /* do disk/partitions IO accounting */
|
||||
#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */
|
||||
#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */
|
||||
#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */
|
||||
@@ -693,12 +699,11 @@ struct request_queue {
|
||||
#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
|
||||
#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */
|
||||
#define QUEUE_FLAG_DAX 23 /* device supports DAX */
|
||||
#define QUEUE_FLAG_STATS 24 /* track rq completion times */
|
||||
#define QUEUE_FLAG_STATS 24 /* track IO start and completion times */
|
||||
#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
|
||||
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
|
||||
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
|
||||
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
|
||||
#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
|
||||
|
||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||
(1 << QUEUE_FLAG_SAME_COMP) | \
|
||||
@@ -736,12 +741,11 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
|
||||
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
|
||||
REQ_FAILFAST_DRIVER))
|
||||
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
|
||||
#define blk_queue_preempt_only(q) \
|
||||
test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
|
||||
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
|
||||
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
|
||||
|
||||
extern int blk_set_preempt_only(struct request_queue *q);
|
||||
extern void blk_clear_preempt_only(struct request_queue *q);
|
||||
extern void blk_set_pm_only(struct request_queue *q);
|
||||
extern void blk_clear_pm_only(struct request_queue *q);
|
||||
|
||||
static inline int queue_in_flight(struct request_queue *q)
|
||||
{
|
||||
@@ -1280,29 +1284,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
|
||||
extern void blk_put_queue(struct request_queue *);
|
||||
extern void blk_set_queue_dying(struct request_queue *);
|
||||
|
||||
/*
|
||||
* block layer runtime pm functions
|
||||
*/
|
||||
#ifdef CONFIG_PM
|
||||
extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
|
||||
extern int blk_pre_runtime_suspend(struct request_queue *q);
|
||||
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
|
||||
extern void blk_pre_runtime_resume(struct request_queue *q);
|
||||
extern void blk_post_runtime_resume(struct request_queue *q, int err);
|
||||
extern void blk_set_runtime_active(struct request_queue *q);
|
||||
#else
|
||||
static inline void blk_pm_runtime_init(struct request_queue *q,
|
||||
struct device *dev) {}
|
||||
static inline int blk_pre_runtime_suspend(struct request_queue *q)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
|
||||
static inline void blk_pre_runtime_resume(struct request_queue *q) {}
|
||||
static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
|
||||
static inline void blk_set_runtime_active(struct request_queue *q) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* blk_plug permits building a queue of related requests by holding the I/O
|
||||
* fragments for a short period. This allows merging of sequential requests
|
||||
@@ -1676,94 +1657,6 @@ static inline void put_dev_sector(Sector p)
|
||||
put_page(p.v);
|
||||
}
|
||||
|
||||
static inline bool __bvec_gap_to_prev(struct request_queue *q,
|
||||
struct bio_vec *bprv, unsigned int offset)
|
||||
{
|
||||
return offset ||
|
||||
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if adding a bio_vec after bprv with offset would create a gap in
|
||||
* the SG list. Most drivers don't care about this, but some do.
|
||||
*/
|
||||
static inline bool bvec_gap_to_prev(struct request_queue *q,
|
||||
struct bio_vec *bprv, unsigned int offset)
|
||||
{
|
||||
if (!queue_virt_boundary(q))
|
||||
return false;
|
||||
return __bvec_gap_to_prev(q, bprv, offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the two bvecs from two bios can be merged to one segment.
|
||||
* If yes, no need to check gap between the two bios since the 1st bio
|
||||
* and the 1st bvec in the 2nd bio can be handled in one segment.
|
||||
*/
|
||||
static inline bool bios_segs_mergeable(struct request_queue *q,
|
||||
struct bio *prev, struct bio_vec *prev_last_bv,
|
||||
struct bio_vec *next_first_bv)
|
||||
{
|
||||
if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
|
||||
return false;
|
||||
if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
|
||||
return false;
|
||||
if (prev->bi_seg_back_size + next_first_bv->bv_len >
|
||||
queue_max_segment_size(q))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool bio_will_gap(struct request_queue *q,
|
||||
struct request *prev_rq,
|
||||
struct bio *prev,
|
||||
struct bio *next)
|
||||
{
|
||||
if (bio_has_data(prev) && queue_virt_boundary(q)) {
|
||||
struct bio_vec pb, nb;
|
||||
|
||||
/*
|
||||
* don't merge if the 1st bio starts with non-zero
|
||||
* offset, otherwise it is quite difficult to respect
|
||||
* sg gap limit. We work hard to merge a huge number of small
|
||||
* single bios in case of mkfs.
|
||||
*/
|
||||
if (prev_rq)
|
||||
bio_get_first_bvec(prev_rq->bio, &pb);
|
||||
else
|
||||
bio_get_first_bvec(prev, &pb);
|
||||
if (pb.bv_offset)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* We don't need to worry about the situation that the
|
||||
* merged segment ends in unaligned virt boundary:
|
||||
*
|
||||
* - if 'pb' ends aligned, the merged segment ends aligned
|
||||
* - if 'pb' ends unaligned, the next bio must include
|
||||
* one single bvec of 'nb', otherwise the 'nb' can't
|
||||
* merge with 'pb'
|
||||
*/
|
||||
bio_get_last_bvec(prev, &pb);
|
||||
bio_get_first_bvec(next, &nb);
|
||||
|
||||
if (!bios_segs_mergeable(q, prev, &pb, &nb))
|
||||
return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
|
||||
{
|
||||
return bio_will_gap(req->q, req, req->biotail, bio);
|
||||
}
|
||||
|
||||
static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
|
||||
{
|
||||
return bio_will_gap(req->q, NULL, bio, req->bio);
|
||||
}
|
||||
|
||||
int kblockd_schedule_work(struct work_struct *work);
|
||||
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
|
||||
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
|
||||
@@ -1843,26 +1736,6 @@ queue_max_integrity_segments(struct request_queue *q)
|
||||
return q->limits.max_integrity_segments;
|
||||
}
|
||||
|
||||
static inline bool integrity_req_gap_back_merge(struct request *req,
|
||||
struct bio *next)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(req->bio);
|
||||
struct bio_integrity_payload *bip_next = bio_integrity(next);
|
||||
|
||||
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
|
||||
bip_next->bip_vec[0].bv_offset);
|
||||
}
|
||||
|
||||
static inline bool integrity_req_gap_front_merge(struct request *req,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
|
||||
|
||||
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
|
||||
bip_next->bip_vec[0].bv_offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_integrity_intervals - Return number of integrity intervals for a bio
|
||||
* @bi: blk_integrity profile for device
|
||||
@@ -1947,17 +1820,6 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool integrity_req_gap_back_merge(struct request *req,
|
||||
struct bio *next)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline bool integrity_req_gap_front_merge(struct request *req,
|
||||
struct bio *bio)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
|
||||
unsigned int sectors)
|
||||
{
|
||||
|
@@ -40,8 +40,6 @@ struct bvec_iter {
|
||||
|
||||
unsigned int bi_idx; /* current index into bvl_vec */
|
||||
|
||||
unsigned int bi_done; /* number of bytes completed */
|
||||
|
||||
unsigned int bi_bvec_done; /* number of bytes completed in
|
||||
current bvec */
|
||||
};
|
||||
@@ -85,7 +83,6 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
|
||||
bytes -= len;
|
||||
iter->bi_size -= len;
|
||||
iter->bi_bvec_done += len;
|
||||
iter->bi_done += len;
|
||||
|
||||
if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
|
||||
iter->bi_bvec_done = 0;
|
||||
|
@@ -93,6 +93,8 @@ extern struct css_set init_css_set;
|
||||
|
||||
bool css_has_online_children(struct cgroup_subsys_state *css);
|
||||
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
|
||||
struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
|
||||
struct cgroup_subsys *ss);
|
||||
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
|
||||
struct cgroup_subsys *ss);
|
||||
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
|
||||
|
@@ -111,7 +111,7 @@ struct elevator_mq_ops {
|
||||
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
|
||||
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
|
||||
bool (*has_work)(struct blk_mq_hw_ctx *);
|
||||
void (*completed_request)(struct request *);
|
||||
void (*completed_request)(struct request *, u64);
|
||||
void (*started_request)(struct request *);
|
||||
void (*requeue_request)(struct request *);
|
||||
struct request *(*former_request)(struct request_queue *, struct request *);
|
||||
|
@@ -402,10 +402,11 @@ static inline void free_part_info(struct hd_struct *part)
|
||||
extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part);
|
||||
|
||||
/* block/genhd.c */
|
||||
extern void device_add_disk(struct device *parent, struct gendisk *disk);
|
||||
extern void device_add_disk(struct device *parent, struct gendisk *disk,
|
||||
const struct attribute_group **groups);
|
||||
static inline void add_disk(struct gendisk *disk)
|
||||
{
|
||||
device_add_disk(NULL, disk);
|
||||
device_add_disk(NULL, disk, NULL);
|
||||
}
|
||||
extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
|
||||
static inline void add_disk_no_queue_reg(struct gendisk *disk)
|
||||
|
@@ -86,8 +86,8 @@ struct nvm_chk_meta;
|
||||
typedef int (nvm_id_fn)(struct nvm_dev *);
|
||||
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
|
||||
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
|
||||
typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *,
|
||||
sector_t, int);
|
||||
typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
|
||||
struct nvm_chk_meta *);
|
||||
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
|
||||
@@ -305,6 +305,8 @@ struct nvm_rq {
|
||||
u64 ppa_status; /* ppa media status */
|
||||
int error;
|
||||
|
||||
int is_seq; /* Sequential hint flag. 1.2 only */
|
||||
|
||||
void *private;
|
||||
};
|
||||
|
||||
@@ -318,6 +320,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
|
||||
return rqdata + 1;
|
||||
}
|
||||
|
||||
static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
|
||||
{
|
||||
return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
|
||||
}
|
||||
|
||||
enum {
|
||||
NVM_BLK_ST_FREE = 0x1, /* Free block */
|
||||
NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
|
||||
@@ -485,6 +492,144 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
|
||||
return l;
|
||||
}
|
||||
|
||||
static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
|
||||
struct ppa_addr p)
|
||||
{
|
||||
struct nvm_geo *geo = &dev->geo;
|
||||
u64 caddr;
|
||||
|
||||
if (geo->version == NVM_OCSSD_SPEC_12) {
|
||||
struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
|
||||
|
||||
caddr = (u64)p.g.pg << ppaf->pg_offset;
|
||||
caddr |= (u64)p.g.pl << ppaf->pln_offset;
|
||||
caddr |= (u64)p.g.sec << ppaf->sec_offset;
|
||||
} else {
|
||||
caddr = p.m.sec;
|
||||
}
|
||||
|
||||
return caddr;
|
||||
}
|
||||
|
||||
static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
|
||||
void *addrf, u32 ppa32)
|
||||
{
|
||||
struct ppa_addr ppa64;
|
||||
|
||||
ppa64.ppa = 0;
|
||||
|
||||
if (ppa32 == -1) {
|
||||
ppa64.ppa = ADDR_EMPTY;
|
||||
} else if (ppa32 & (1U << 31)) {
|
||||
ppa64.c.line = ppa32 & ((~0U) >> 1);
|
||||
ppa64.c.is_cached = 1;
|
||||
} else {
|
||||
struct nvm_geo *geo = &dev->geo;
|
||||
|
||||
if (geo->version == NVM_OCSSD_SPEC_12) {
|
||||
struct nvm_addrf_12 *ppaf = addrf;
|
||||
|
||||
ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
|
||||
ppaf->ch_offset;
|
||||
ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
|
||||
ppaf->lun_offset;
|
||||
ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
|
||||
ppaf->blk_offset;
|
||||
ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
|
||||
ppaf->pg_offset;
|
||||
ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
|
||||
ppaf->pln_offset;
|
||||
ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
|
||||
ppaf->sec_offset;
|
||||
} else {
|
||||
struct nvm_addrf *lbaf = addrf;
|
||||
|
||||
ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
|
||||
lbaf->ch_offset;
|
||||
ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
|
||||
lbaf->lun_offset;
|
||||
ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
|
||||
lbaf->chk_offset;
|
||||
ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
|
||||
lbaf->sec_offset;
|
||||
}
|
||||
}
|
||||
|
||||
return ppa64;
|
||||
}
|
||||
|
||||
static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
|
||||
void *addrf, struct ppa_addr ppa64)
|
||||
{
|
||||
u32 ppa32 = 0;
|
||||
|
||||
if (ppa64.ppa == ADDR_EMPTY) {
|
||||
ppa32 = ~0U;
|
||||
} else if (ppa64.c.is_cached) {
|
||||
ppa32 |= ppa64.c.line;
|
||||
ppa32 |= 1U << 31;
|
||||
} else {
|
||||
struct nvm_geo *geo = &dev->geo;
|
||||
|
||||
if (geo->version == NVM_OCSSD_SPEC_12) {
|
||||
struct nvm_addrf_12 *ppaf = addrf;
|
||||
|
||||
ppa32 |= ppa64.g.ch << ppaf->ch_offset;
|
||||
ppa32 |= ppa64.g.lun << ppaf->lun_offset;
|
||||
ppa32 |= ppa64.g.blk << ppaf->blk_offset;
|
||||
ppa32 |= ppa64.g.pg << ppaf->pg_offset;
|
||||
ppa32 |= ppa64.g.pl << ppaf->pln_offset;
|
||||
ppa32 |= ppa64.g.sec << ppaf->sec_offset;
|
||||
} else {
|
||||
struct nvm_addrf *lbaf = addrf;
|
||||
|
||||
ppa32 |= ppa64.m.grp << lbaf->ch_offset;
|
||||
ppa32 |= ppa64.m.pu << lbaf->lun_offset;
|
||||
ppa32 |= ppa64.m.chk << lbaf->chk_offset;
|
||||
ppa32 |= ppa64.m.sec << lbaf->sec_offset;
|
||||
}
|
||||
}
|
||||
|
||||
return ppa32;
|
||||
}
|
||||
|
||||
static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
|
||||
struct ppa_addr *ppa)
|
||||
{
|
||||
struct nvm_geo *geo = &dev->geo;
|
||||
int last = 0;
|
||||
|
||||
if (geo->version == NVM_OCSSD_SPEC_12) {
|
||||
int sec = ppa->g.sec;
|
||||
|
||||
sec++;
|
||||
if (sec == geo->ws_min) {
|
||||
int pg = ppa->g.pg;
|
||||
|
||||
sec = 0;
|
||||
pg++;
|
||||
if (pg == geo->num_pg) {
|
||||
int pl = ppa->g.pl;
|
||||
|
||||
pg = 0;
|
||||
pl++;
|
||||
if (pl == geo->num_pln)
|
||||
last = 1;
|
||||
|
||||
ppa->g.pl = pl;
|
||||
}
|
||||
ppa->g.pg = pg;
|
||||
}
|
||||
ppa->g.sec = sec;
|
||||
} else {
|
||||
ppa->m.sec++;
|
||||
if (ppa->m.sec == geo->clba)
|
||||
last = 1;
|
||||
}
|
||||
|
||||
return last;
|
||||
}
|
||||
|
||||
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
|
||||
typedef sector_t (nvm_tgt_capacity_fn)(void *);
|
||||
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
|
||||
@@ -493,9 +638,15 @@ typedef void (nvm_tgt_exit_fn)(void *, bool);
|
||||
typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
|
||||
typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
|
||||
|
||||
enum {
|
||||
NVM_TGT_F_DEV_L2P = 0,
|
||||
NVM_TGT_F_HOST_L2P = 1 << 0,
|
||||
};
|
||||
|
||||
struct nvm_tgt_type {
|
||||
const char *name;
|
||||
unsigned int version[3];
|
||||
int flags;
|
||||
|
||||
/* target entry points */
|
||||
nvm_tgt_make_rq_fn *make_rq;
|
||||
@@ -524,18 +675,13 @@ extern struct nvm_dev *nvm_alloc_dev(int);
|
||||
extern int nvm_register(struct nvm_dev *);
|
||||
extern void nvm_unregister(struct nvm_dev *);
|
||||
|
||||
|
||||
extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev,
|
||||
struct nvm_chk_meta *meta, struct ppa_addr ppa,
|
||||
int nchks);
|
||||
|
||||
extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
|
||||
extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
|
||||
int, struct nvm_chk_meta *);
|
||||
extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
|
||||
int, int);
|
||||
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
|
||||
extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
|
||||
extern void nvm_end_io(struct nvm_rq *);
|
||||
extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
|
||||
extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
|
||||
|
||||
#else /* CONFIG_NVM */
|
||||
struct nvm_dev_ops;
|
||||
|
@@ -23,7 +23,6 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
struct hd_geometry;
|
||||
struct mtd_info;
|
||||
@@ -44,9 +43,9 @@ struct mtd_blktrans_dev {
|
||||
struct kref ref;
|
||||
struct gendisk *disk;
|
||||
struct attribute_group *disk_attributes;
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct work;
|
||||
struct request_queue *rq;
|
||||
struct list_head rq_list;
|
||||
struct blk_mq_tag_set *tag_set;
|
||||
spinlock_t queue_lock;
|
||||
void *priv;
|
||||
fmode_t file_mode;
|
||||
|
@@ -1241,6 +1241,7 @@ enum {
|
||||
NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
|
||||
NVME_SC_ANA_INACCESSIBLE = 0x302,
|
||||
NVME_SC_ANA_TRANSITION = 0x303,
|
||||
NVME_SC_HOST_PATH_ERROR = 0x370,
|
||||
|
||||
NVME_SC_DNR = 0x4000,
|
||||
};
|
||||
|
@@ -108,6 +108,7 @@ void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
|
||||
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
|
||||
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
|
||||
percpu_ref_func_t *confirm_kill);
|
||||
void percpu_ref_resurrect(struct percpu_ref *ref);
|
||||
void percpu_ref_reinit(struct percpu_ref *ref);
|
||||
|
||||
/**
|
||||
|
@@ -246,7 +246,8 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
|
||||
*
|
||||
* @bio is a part of the writeback in progress controlled by @wbc. Perform
|
||||
* writeback specific initialization. This is used to apply the cgroup
|
||||
* writeback context.
|
||||
* writeback context. Must be called after the bio has been associated with
|
||||
* a device.
|
||||
*/
|
||||
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
|
||||
{
|
||||
@@ -257,7 +258,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
|
||||
* regular writeback instead of writing things out itself.
|
||||
*/
|
||||
if (wbc->wb)
|
||||
bio_associate_blkcg(bio, wbc->wb->blkcg_css);
|
||||
bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
|
||||
}
|
||||
|
||||
#else /* CONFIG_CGROUP_WRITEBACK */
|
||||
|
Reference in New Issue
Block a user