Merge tag 'for-4.20/block-20181021' of git://git.kernel.dk/linux-block

Pull block layer updates from Jens Axboe:
 "This is the main pull request for block changes for 4.20. This
  contains:

   - Series enabling runtime PM for blk-mq (Bart).

   - Two pull requests from Christoph for NVMe, with items such as;
      - Better AEN tracking
      - Multipath improvements
      - RDMA fixes
      - Rework of FC for target removal
      - Fixes for issues identified by static checkers
      - Fabric cleanups, as prep for TCP transport
      - Various cleanups and bug fixes

   - Block merging cleanups (Christoph)

   - Conversion of drivers to generic DMA mapping API (Christoph)

   - Series fixing ref count issues with blkcg (Dennis)

   - Series improving BFQ heuristics (Paolo, et al)

   - Series improving heuristics for the Kyber IO scheduler (Omar)

   - Removal of dangerous bio_rewind_iter() API (Ming)

   - Apply single queue IPI redirection logic to blk-mq (Ming)

   - Set of fixes and improvements for bcache (Coly et al)

   - Series closing a hotplug race with sysfs group attributes (Hannes)

   - Set of patches for lightnvm:
      - pblk trace support (Hans)
      - SPDX license header update (Javier)
      - Tons of refactoring patches to cleanly abstract the 1.2 and 2.0
        specs behind a common core interface. (Javier, Matias)
      - Enable pblk to use a common interface to retrieve chunk metadata
        (Matias)
      - Bug fixes (Various)

   - Set of fixes and updates to the blk IO latency target (Josef)

   - blk-mq queue number updates fixes (Jianchao)

   - Convert a bunch of drivers from the old legacy IO interface to
     blk-mq. This will conclude with the removal of the legacy IO
     interface itself in 4.21, with the rest of the drivers (me, Omar)

   - Removal of the DAC960 driver. The SCSI tree will introduce two
     replacement drivers for this (Hannes)"

* tag 'for-4.20/block-20181021' of git://git.kernel.dk/linux-block: (204 commits)
  block: setup bounce bio_sets properly
  blkcg: reassociate bios when make_request() is called recursively
  blkcg: fix edge case for blk_get_rl() under memory pressure
  nvme-fabrics: move controller options matching to fabrics
  nvme-rdma: always have a valid trsvcid
  mtip32xx: fully switch to the generic DMA API
  rsxx: switch to the generic DMA API
  umem: switch to the generic DMA API
  sx8: switch to the generic DMA API
  sx8: remove dead IF_64BIT_DMA_IS_POSSIBLE code
  skd: switch to the generic DMA API
  ubd: remove use of blk_rq_map_sg
  nvme-pci: remove duplicate check
  drivers/block: Remove DAC960 driver
  nvme-pci: fix hot removal during error handling
  nvmet-fcloop: suppress a compiler warning
  nvme-core: make implicit seed truncation explicit
  nvmet-fc: fix kernel-doc headers
  nvme-fc: rework the request initialization code
  nvme-fc: introduce struct nvme_fcp_op_w_sgl
  ...
This commit is contained in:
Linus Torvalds
2018-10-22 17:46:08 +01:00
181 changed files with 5706 additions and 16899 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -121,18 +121,6 @@ source "drivers/block/mtip32xx/Kconfig"
source "drivers/block/zram/Kconfig"
config BLK_DEV_DAC960
tristate "Mylex DAC960/DAC1100 PCI RAID Controller support"
depends on PCI
help
This driver adds support for the Mylex DAC960, AcceleRAID, and
eXtremeRAID PCI RAID controllers. See the file
<file:Documentation/blockdev/README.DAC960> for further information
about this driver.
To compile this driver as a module, choose M here: the
module will be called DAC960.
config BLK_DEV_UMEM
tristate "Micro Memory MM5415 Battery Backed RAM support"
depends on PCI
@@ -461,7 +449,6 @@ config BLK_DEV_RBD
select LIBCRC32C
select CRYPTO_AES
select CRYPTO
default n
help
Say Y here if you want include the Rados block device, which stripes
a block device over objects stored in the Ceph distributed object

View File

@@ -16,7 +16,6 @@ obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o

View File

@@ -61,10 +61,8 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/amifdreg.h>
#include <linux/amifd.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/elevator.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -86,6 +84,126 @@
* Defines
*/
/*
* CIAAPRA bits (read only)
*/
#define DSKRDY (0x1<<5) /* disk ready when low */
#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
#define DSKPROT (0x1<<3) /* disk protected when low */
#define DSKCHANGE (0x1<<2) /* low when disk removed */
/*
* CIAAPRB bits (read/write)
*/
#define DSKMOTOR (0x1<<7) /* motor on when low */
#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
#define DSKSTEP (0x1) /* pulse low to step head 1 track */
/*
* DSKBYTR bits (read only)
*/
#define DSKBYT (1<<15) /* register contains valid byte when set */
#define DMAON (1<<14) /* disk DMA enabled */
#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
/* bits 7-0 are data */
/*
* ADKCON/ADKCONR bits
*/
#ifndef SETCLR
#define ADK_SETCLR (1<<15) /* control bit */
#endif
#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
/*
* DSKLEN bits
*/
#define DSKLEN_DMAEN (1<<15)
#define DSKLEN_WRITE (1<<14)
/*
* INTENA/INTREQ bits
*/
#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
/*
* Misc
*/
#define MFM_SYNC 0x4489 /* standard MFM sync value */
/* Values for FD_COMMAND */
#define FD_RECALIBRATE 0x07 /* move to track 0 */
#define FD_SEEK 0x0F /* seek track */
#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
#define FD_WRITE 0xC5 /* write with MT, MFM */
#define FD_SENSEI 0x08 /* Sense Interrupt Status */
#define FD_SPECIFY 0x03 /* specify HUT etc */
#define FD_FORMAT 0x4D /* format one track */
#define FD_VERSION 0x10 /* get version code */
#define FD_CONFIGURE 0x13 /* configure FIFO operation */
#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
#define FD_MAX_UNITS 4 /* Max. Number of drives */
#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
struct fd_data_type {
char *name; /* description of data type */
int sects; /* sectors per track */
int (*read_fkt)(int); /* read whole track */
void (*write_fkt)(int); /* write whole track */
};
struct fd_drive_type {
unsigned long code; /* code returned from drive */
char *name; /* description of drive */
unsigned int tracks; /* number of tracks */
unsigned int heads; /* number of heads */
unsigned int read_size; /* raw read size for one track */
unsigned int write_size; /* raw write size for one track */
unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
unsigned int precomp1; /* start track for precomp 1 */
unsigned int precomp2; /* start track for precomp 2 */
unsigned int step_delay; /* time (in ms) for delay after step */
unsigned int settle_time; /* time to settle after dir change */
unsigned int side_time; /* time needed to change sides */
};
struct amiga_floppy_struct {
struct fd_drive_type *type; /* type of floppy for this unit */
struct fd_data_type *dtype; /* type of floppy for this unit */
int track; /* current track (-1 == unknown) */
unsigned char *trackbuf; /* current track (kmaloc()'d */
int blocks; /* total # blocks on disk */
int changed; /* true when not known */
int disk; /* disk in drive (-1 == unknown) */
int motor; /* true when motor is at speed */
int busy; /* true when drive is active */
int dirty; /* true when trackbuf is not on disk */
int status; /* current error code for unit */
struct gendisk *gendisk;
struct blk_mq_tag_set tag_set;
};
/*
* Error codes
*/
@@ -164,7 +282,6 @@ static volatile int selected = -1; /* currently selected drive */
static int writepending;
static int writefromint;
static char *raw_buf;
static int fdc_queue;
static DEFINE_SPINLOCK(amiflop_lock);
@@ -1337,76 +1454,20 @@ static int get_track(int drive, int track)
return -1;
}
/*
* Round-robin between our available drives, doing one request from each
*/
static struct request *set_next_request(void)
static blk_status_t amiflop_rw_cur_segment(struct amiga_floppy_struct *floppy,
struct request *rq)
{
struct request_queue *q;
int cnt = FD_MAX_UNITS;
struct request *rq = NULL;
/* Find next queue we can dispatch from */
fdc_queue = fdc_queue + 1;
if (fdc_queue == FD_MAX_UNITS)
fdc_queue = 0;
for(cnt = FD_MAX_UNITS; cnt > 0; cnt--) {
if (unit[fdc_queue].type->code == FD_NODRIVE) {
if (++fdc_queue == FD_MAX_UNITS)
fdc_queue = 0;
continue;
}
q = unit[fdc_queue].gendisk->queue;
if (q) {
rq = blk_fetch_request(q);
if (rq)
break;
}
if (++fdc_queue == FD_MAX_UNITS)
fdc_queue = 0;
}
return rq;
}
static void redo_fd_request(void)
{
struct request *rq;
int drive = floppy - unit;
unsigned int cnt, block, track, sector;
int drive;
struct amiga_floppy_struct *floppy;
char *data;
unsigned long flags;
blk_status_t err;
next_req:
rq = set_next_request();
if (!rq) {
/* Nothing left to do */
return;
}
floppy = rq->rq_disk->private_data;
drive = floppy - unit;
next_segment:
/* Here someone could investigate to be more efficient */
for (cnt = 0, err = BLK_STS_OK; cnt < blk_rq_cur_sectors(rq); cnt++) {
for (cnt = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
#ifdef DEBUG
printk("fd: sector %ld + %d requested for %s\n",
blk_rq_pos(rq), cnt,
(rq_data_dir(rq) == READ) ? "read" : "write");
#endif
block = blk_rq_pos(rq) + cnt;
if ((int)block > floppy->blocks) {
err = BLK_STS_IOERR;
break;
}
track = block / (floppy->dtype->sects * floppy->type->sect_mult);
sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
data = bio_data(rq->bio) + 512 * cnt;
@@ -1415,10 +1476,8 @@ next_segment:
"0x%08lx\n", track, sector, data);
#endif
if (get_track(drive, track) == -1) {
err = BLK_STS_IOERR;
break;
}
if (get_track(drive, track) == -1)
return BLK_STS_IOERR;
if (rq_data_dir(rq) == READ) {
memcpy(data, floppy->trackbuf + sector * 512, 512);
@@ -1426,31 +1485,40 @@ next_segment:
memcpy(floppy->trackbuf + sector * 512, data, 512);
/* keep the drive spinning while writes are scheduled */
if (!fd_motor_on(drive)) {
err = BLK_STS_IOERR;
break;
}
if (!fd_motor_on(drive))
return BLK_STS_IOERR;
/*
* setup a callback to write the track buffer
* after a short (1 tick) delay.
*/
local_irq_save(flags);
floppy->dirty = 1;
/* reset the timer */
mod_timer (flush_track_timer + drive, jiffies + 1);
local_irq_restore(flags);
}
}
if (__blk_end_request_cur(rq, err))
goto next_segment;
goto next_req;
return BLK_STS_OK;
}
static void do_fd_request(struct request_queue * q)
static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
redo_fd_request();
struct request *rq = bd->rq;
struct amiga_floppy_struct *floppy = rq->rq_disk->private_data;
blk_status_t err;
if (!spin_trylock_irq(&amiflop_lock))
return BLK_STS_DEV_RESOURCE;
blk_mq_start_request(rq);
do {
err = amiflop_rw_cur_segment(floppy, rq);
} while (blk_update_request(rq, err, blk_rq_cur_bytes(rq)));
blk_mq_end_request(rq, err);
spin_unlock_irq(&amiflop_lock);
return BLK_STS_OK;
}
static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1701,11 +1769,47 @@ static const struct block_device_operations floppy_fops = {
.check_events = amiga_check_events,
};
static const struct blk_mq_ops amiflop_mq_ops = {
.queue_rq = amiflop_queue_rq,
};
static struct gendisk *fd_alloc_disk(int drive)
{
struct gendisk *disk;
disk = alloc_disk(1);
if (!disk)
goto out;
disk->queue = blk_mq_init_sq_queue(&unit[drive].tag_set, &amiflop_mq_ops,
2, BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disk->queue)) {
disk->queue = NULL;
goto out_put_disk;
}
unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL);
if (!unit[drive].trackbuf)
goto out_cleanup_queue;
return disk;
out_cleanup_queue:
blk_cleanup_queue(disk->queue);
disk->queue = NULL;
blk_mq_free_tag_set(&unit[drive].tag_set);
out_put_disk:
put_disk(disk);
out:
unit[drive].type->code = FD_NODRIVE;
return NULL;
}
static int __init fd_probe_drives(void)
{
int drive,drives,nomem;
printk(KERN_INFO "FD: probing units\nfound ");
pr_info("FD: probing units\nfound");
drives=0;
nomem=0;
for(drive=0;drive<FD_MAX_UNITS;drive++) {
@@ -1713,27 +1817,17 @@ static int __init fd_probe_drives(void)
fd_probe(drive);
if (unit[drive].type->code == FD_NODRIVE)
continue;
disk = alloc_disk(1);
disk = fd_alloc_disk(drive);
if (!disk) {
unit[drive].type->code = FD_NODRIVE;
pr_cont(" no mem for fd%d", drive);
nomem = 1;
continue;
}
unit[drive].gendisk = disk;
disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
if (!disk->queue) {
unit[drive].type->code = FD_NODRIVE;
continue;
}
drives++;
if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) {
printk("no mem for ");
unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */
drives--;
nomem = 1;
}
printk("fd%d ",drive);
pr_cont(" fd%d",drive);
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive;
disk->fops = &floppy_fops;
@@ -1744,11 +1838,11 @@ static int __init fd_probe_drives(void)
}
if ((drives > 0) || (nomem == 0)) {
if (drives == 0)
printk("no drives");
printk("\n");
pr_cont(" no drives");
pr_cont("\n");
return drives;
}
printk("\n");
pr_cont("\n");
return -ENOMEM;
}
@@ -1831,30 +1925,6 @@ out_blkdev:
return ret;
}
#if 0 /* not safe to unload */
static int __exit amiga_floppy_remove(struct platform_device *pdev)
{
int i;
for( i = 0; i < FD_MAX_UNITS; i++) {
if (unit[i].type->code != FD_NODRIVE) {
struct request_queue *q = unit[i].gendisk->queue;
del_gendisk(unit[i].gendisk);
put_disk(unit[i].gendisk);
kfree(unit[i].trackbuf);
if (q)
blk_cleanup_queue(q);
}
}
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
free_irq(IRQ_AMIGA_CIAA_TB, NULL);
free_irq(IRQ_AMIGA_DSKBLK, NULL);
custom.dmacon = DMAF_DISK; /* disable DMA */
amiga_chip_free(raw_buf);
unregister_blkdev(FLOPPY_MAJOR, "fd");
}
#endif
static struct platform_driver amiga_floppy_driver = {
.driver = {
.name = "amiga-floppy",

View File

@@ -1,4 +1,6 @@
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
#include <linux/blk-mq.h>
#define VERSION "85"
#define AOE_MAJOR 152
#define DEVICE_NAME "aoe"
@@ -164,6 +166,8 @@ struct aoedev {
struct gendisk *gd;
struct dentry *debugfs;
struct request_queue *blkq;
struct list_head rq_list;
struct blk_mq_tag_set tag_set;
struct hd_geometry geo;
sector_t ssize;
struct timer_list timer;
@@ -201,7 +205,6 @@ int aoeblk_init(void);
void aoeblk_exit(void);
void aoeblk_gdalloc(void *);
void aoedisk_rm_debugfs(struct aoedev *d);
void aoedisk_rm_sysfs(struct aoedev *d);
int aoechr_init(void);
void aoechr_exit(void);

View File

@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include <linux/hdreg.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
@@ -177,10 +177,15 @@ static struct attribute *aoe_attrs[] = {
NULL,
};
static const struct attribute_group attr_group = {
static const struct attribute_group aoe_attr_group = {
.attrs = aoe_attrs,
};
static const struct attribute_group *aoe_attr_groups[] = {
&aoe_attr_group,
NULL,
};
static const struct file_operations aoe_debugfs_fops = {
.open = aoe_debugfs_open,
.read = seq_read,
@@ -219,17 +224,6 @@ aoedisk_rm_debugfs(struct aoedev *d)
d->debugfs = NULL;
}
static int
aoedisk_add_sysfs(struct aoedev *d)
{
return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
}
void
aoedisk_rm_sysfs(struct aoedev *d)
{
sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
}
static int
aoeblk_open(struct block_device *bdev, fmode_t mode)
{
@@ -274,23 +268,25 @@ aoeblk_release(struct gendisk *disk, fmode_t mode)
spin_unlock_irqrestore(&d->lock, flags);
}
static void
aoeblk_request(struct request_queue *q)
static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct aoedev *d;
struct request *rq;
struct aoedev *d = hctx->queue->queuedata;
spin_lock_irq(&d->lock);
d = q->queuedata;
if ((d->flags & DEVFL_UP) == 0) {
pr_info_ratelimited("aoe: device %ld.%d is not up\n",
d->aoemajor, d->aoeminor);
while ((rq = blk_peek_request(q))) {
blk_start_request(rq);
aoe_end_request(d, rq, 1);
}
return;
spin_unlock_irq(&d->lock);
blk_mq_start_request(bd->rq);
return BLK_STS_IOERR;
}
list_add_tail(&bd->rq->queuelist, &d->rq_list);
aoecmd_work(d);
spin_unlock_irq(&d->lock);
return BLK_STS_OK;
}
static int
@@ -345,6 +341,10 @@ static const struct block_device_operations aoe_bdops = {
.owner = THIS_MODULE,
};
static const struct blk_mq_ops aoeblk_mq_ops = {
.queue_rq = aoeblk_queue_rq,
};
/* alloc_disk and add_disk can sleep */
void
aoeblk_gdalloc(void *vp)
@@ -353,9 +353,11 @@ aoeblk_gdalloc(void *vp)
struct gendisk *gd;
mempool_t *mp;
struct request_queue *q;
struct blk_mq_tag_set *set;
enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
ulong flags;
int late = 0;
int err;
spin_lock_irqsave(&d->lock, flags);
if (d->flags & DEVFL_GDALLOC
@@ -382,10 +384,25 @@ aoeblk_gdalloc(void *vp)
d->aoemajor, d->aoeminor);
goto err_disk;
}
q = blk_init_queue(aoeblk_request, &d->lock);
if (q == NULL) {
set = &d->tag_set;
set->ops = &aoeblk_mq_ops;
set->nr_hw_queues = 1;
set->queue_depth = 128;
set->numa_node = NUMA_NO_NODE;
set->flags = BLK_MQ_F_SHOULD_MERGE;
err = blk_mq_alloc_tag_set(set);
if (err) {
pr_err("aoe: cannot allocate tag set for %ld.%d\n",
d->aoemajor, d->aoeminor);
goto err_mempool;
}
q = blk_mq_init_queue(set);
if (IS_ERR(q)) {
pr_err("aoe: cannot allocate block queue for %ld.%d\n",
d->aoemajor, d->aoeminor);
blk_mq_free_tag_set(set);
goto err_mempool;
}
@@ -417,8 +434,7 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
add_disk(gd);
aoedisk_add_sysfs(d);
device_add_disk(NULL, gd, aoe_attr_groups);
aoedisk_add_debugfs(d);
spin_lock_irqsave(&d->lock, flags);

View File

@@ -7,7 +7,7 @@
#include <linux/ata.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/genhd.h>
@@ -813,7 +813,7 @@ rexmit_timer(struct timer_list *timer)
out:
if ((d->flags & DEVFL_KICKME) && d->blkq) {
d->flags &= ~DEVFL_KICKME;
d->blkq->request_fn(d->blkq);
blk_mq_run_hw_queues(d->blkq, true);
}
d->timer.expires = jiffies + TIMERTICK;
@@ -857,10 +857,12 @@ nextbuf(struct aoedev *d)
return d->ip.buf;
rq = d->ip.rq;
if (rq == NULL) {
rq = blk_peek_request(q);
rq = list_first_entry_or_null(&d->rq_list, struct request,
queuelist);
if (rq == NULL)
return NULL;
blk_start_request(rq);
list_del_init(&rq->queuelist);
blk_mq_start_request(rq);
d->ip.rq = rq;
d->ip.nxbio = rq->bio;
rq->special = (void *) rqbiocnt(rq);
@@ -1045,6 +1047,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
struct bio *bio;
int bok;
struct request_queue *q;
blk_status_t err = BLK_STS_OK;
q = d->blkq;
if (rq == d->ip.rq)
@@ -1052,11 +1055,15 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
do {
bio = rq->bio;
bok = !fastfail && !bio->bi_status;
} while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
if (!bok)
err = BLK_STS_IOERR;
} while (blk_update_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
__blk_mq_end_request(rq, err);
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
__blk_run_queue(q);
blk_mq_run_hw_queues(q, true);
}
static void

View File

@@ -5,7 +5,7 @@
*/
#include <linux/hdreg.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -197,7 +197,6 @@ aoedev_downdev(struct aoedev *d)
{
struct aoetgt *t, **tt, **te;
struct list_head *head, *pos, *nx;
struct request *rq;
int i;
d->flags &= ~DEVFL_UP;
@@ -225,10 +224,11 @@ aoedev_downdev(struct aoedev *d)
/* fast fail all pending I/O */
if (d->blkq) {
while ((rq = blk_peek_request(d->blkq))) {
blk_start_request(rq);
aoe_end_request(d, rq, 1);
}
/* UP is cleared, freeze+quiesce to insure all are errored */
blk_mq_freeze_queue(d->blkq);
blk_mq_quiesce_queue(d->blkq);
blk_mq_unquiesce_queue(d->blkq);
blk_mq_unfreeze_queue(d->blkq);
}
if (d->gd)
@@ -275,9 +275,9 @@ freedev(struct aoedev *d)
del_timer_sync(&d->timer);
if (d->gd) {
aoedisk_rm_debugfs(d);
aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
put_disk(d->gd);
blk_mq_free_tag_set(&d->tag_set);
blk_cleanup_queue(d->blkq);
}
t = d->targets;
@@ -464,6 +464,7 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
d->ntargets = NTARGETS;
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
INIT_LIST_HEAD(&d->rq_list);
skb_queue_head_init(&d->skbpool);
timer_setup(&d->timer, dummy_timer, 0);
d->timer.expires = jiffies + HZ;

View File

@@ -66,13 +66,11 @@
#include <linux/fd.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/wait.h>
#include <asm/atafd.h>
#include <asm/atafdreg.h>
#include <asm/atariints.h>
#include <asm/atari_stdma.h>
#include <asm/atari_stram.h>
@@ -83,7 +81,87 @@
static DEFINE_MUTEX(ataflop_mutex);
static struct request *fd_request;
static int fdc_queue;
/*
* WD1772 stuff
*/
/* register codes */
#define FDCSELREG_STP (0x80) /* command/status register */
#define FDCSELREG_TRA (0x82) /* track register */
#define FDCSELREG_SEC (0x84) /* sector register */
#define FDCSELREG_DTA (0x86) /* data register */
/* register names for FDC_READ/WRITE macros */
#define FDCREG_CMD 0
#define FDCREG_STATUS 0
#define FDCREG_TRACK 2
#define FDCREG_SECTOR 4
#define FDCREG_DATA 6
/* command opcodes */
#define FDCCMD_RESTORE (0x00) /* - */
#define FDCCMD_SEEK (0x10) /* | */
#define FDCCMD_STEP (0x20) /* | TYP 1 Commands */
#define FDCCMD_STIN (0x40) /* | */
#define FDCCMD_STOT (0x60) /* - */
#define FDCCMD_RDSEC (0x80) /* - TYP 2 Commands */
#define FDCCMD_WRSEC (0xa0) /* - " */
#define FDCCMD_RDADR (0xc0) /* - */
#define FDCCMD_RDTRA (0xe0) /* | TYP 3 Commands */
#define FDCCMD_WRTRA (0xf0) /* - */
#define FDCCMD_FORCI (0xd0) /* - TYP 4 Command */
/* command modifier bits */
#define FDCCMDADD_SR6 (0x00) /* step rate settings */
#define FDCCMDADD_SR12 (0x01)
#define FDCCMDADD_SR2 (0x02)
#define FDCCMDADD_SR3 (0x03)
#define FDCCMDADD_V (0x04) /* verify */
#define FDCCMDADD_H (0x08) /* wait for spin-up */
#define FDCCMDADD_U (0x10) /* update track register */
#define FDCCMDADD_M (0x10) /* multiple sector access */
#define FDCCMDADD_E (0x04) /* head settling flag */
#define FDCCMDADD_P (0x02) /* precompensation off */
#define FDCCMDADD_A0 (0x01) /* DAM flag */
/* status register bits */
#define FDCSTAT_MOTORON (0x80) /* motor on */
#define FDCSTAT_WPROT (0x40) /* write protected (FDCCMD_WR*) */
#define FDCSTAT_SPINUP (0x20) /* motor speed stable (Type I) */
#define FDCSTAT_DELDAM (0x20) /* sector has deleted DAM (Type II+III) */
#define FDCSTAT_RECNF (0x10) /* record not found */
#define FDCSTAT_CRC (0x08) /* CRC error */
#define FDCSTAT_TR00 (0x04) /* Track 00 flag (Type I) */
#define FDCSTAT_LOST (0x04) /* Lost Data (Type II+III) */
#define FDCSTAT_IDX (0x02) /* Index status (Type I) */
#define FDCSTAT_DRQ (0x02) /* DRQ status (Type II+III) */
#define FDCSTAT_BUSY (0x01) /* FDC is busy */
/* PSG Port A Bit Nr 0 .. Side Sel .. 0 -> Side 1 1 -> Side 2 */
#define DSKSIDE (0x01)
#define DSKDRVNONE (0x06)
#define DSKDRV0 (0x02)
#define DSKDRV1 (0x04)
/* step rates */
#define FDCSTEP_6 0x00
#define FDCSTEP_12 0x01
#define FDCSTEP_2 0x02
#define FDCSTEP_3 0x03
struct atari_format_descr {
int track; /* to be formatted */
int head; /* "" "" */
int sect_offset; /* offset of first sector */
};
/* Disk types: DD, HD, ED */
static struct atari_disk_type {
@@ -221,6 +299,7 @@ static struct atari_floppy_struct {
struct gendisk *disk;
int ref;
int type;
struct blk_mq_tag_set tag_set;
} unit[FD_MAX_UNITS];
#define UD unit[drive]
@@ -300,9 +379,6 @@ static int IsFormatting = 0, FormatError;
static int UserSteprate[FD_MAX_UNITS] = { -1, -1 };
module_param_array(UserSteprate, int, NULL, 0);
/* Synchronization of FDC access. */
static volatile int fdc_busy = 0;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_COMPLETION(format_wait);
static unsigned long changed_floppies = 0xff, fake_change = 0;
@@ -362,7 +438,6 @@ static void fd_times_out(struct timer_list *unused);
static void finish_fdc( void );
static void finish_fdc_done( int dummy );
static void setup_req_params( int drive );
static void redo_fd_request( void);
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
cmd, unsigned long param);
static void fd_probe( int drive );
@@ -380,8 +455,11 @@ static DEFINE_TIMER(fd_timer, check_change);
static void fd_end_request_cur(blk_status_t err)
{
if (!__blk_end_request_cur(fd_request, err))
if (!blk_update_request(fd_request, err,
blk_rq_cur_bytes(fd_request))) {
__blk_mq_end_request(fd_request, err);
fd_request = NULL;
}
}
static inline void start_motor_off_timer(void)
@@ -627,7 +705,6 @@ static void fd_error( void )
if (SelectedDrive != -1)
SUD.track = -1;
}
redo_fd_request();
}
@@ -645,14 +722,15 @@ static void fd_error( void )
static int do_format(int drive, int type, struct atari_format_descr *desc)
{
struct request_queue *q = unit[drive].disk->queue;
unsigned char *p;
int sect, nsect;
unsigned long flags;
int ret;
DPRINT(("do_format( dr=%d tr=%d he=%d offs=%d )\n",
drive, desc->track, desc->head, desc->sect_offset ));
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
wait_event(fdc_wait, cmpxchg(&fdc_busy, 0, 1) == 0);
local_irq_save(flags);
stdma_lock(floppy_irq, NULL);
atari_turnon_irq( IRQ_MFP_FDC ); /* should be already, just to be sure */
@@ -661,16 +739,16 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
if (type) {
if (--type >= NUM_DISK_MINORS ||
minor2disktype[type].drive_types > DriveType) {
redo_fd_request();
return -EINVAL;
ret = -EINVAL;
goto out;
}
type = minor2disktype[type].index;
UDT = &atari_disk_type[type];
}
if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) {
redo_fd_request();
return -EINVAL;
ret = -EINVAL;
goto out;
}
nsect = UDT->spt;
@@ -709,8 +787,11 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
wait_for_completion(&format_wait);
redo_fd_request();
return( FormatError ? -EIO : 0 );
ret = FormatError ? -EIO : 0;
out:
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return ret;
}
@@ -740,7 +821,6 @@ static void do_fd_action( int drive )
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
redo_fd_request();
return;
}
}
@@ -1145,7 +1225,6 @@ static void fd_rwsec_done1(int status)
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
redo_fd_request();
}
return;
@@ -1303,8 +1382,6 @@ static void finish_fdc_done( int dummy )
local_irq_save(flags);
stdma_release();
fdc_busy = 0;
wake_up( &fdc_wait );
local_irq_restore(flags);
DPRINT(("finish_fdc() finished\n"));
@@ -1394,59 +1471,34 @@ static void setup_req_params( int drive )
ReqTrack, ReqSector, (unsigned long)ReqData ));
}
/*
* Round-robin between our available drives, doing one request from each
*/
static struct request *set_next_request(void)
static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request_queue *q;
int old_pos = fdc_queue;
struct request *rq = NULL;
struct atari_floppy_struct *floppy = bd->rq->rq_disk->private_data;
int drive = floppy - unit;
int type = floppy->type;
do {
q = unit[fdc_queue].disk->queue;
if (++fdc_queue == FD_MAX_UNITS)
fdc_queue = 0;
if (q) {
rq = blk_fetch_request(q);
if (rq) {
rq->error_count = 0;
break;
}
}
} while (fdc_queue != old_pos);
spin_lock_irq(&ataflop_lock);
if (fd_request) {
spin_unlock_irq(&ataflop_lock);
return BLK_STS_DEV_RESOURCE;
}
if (!stdma_try_lock(floppy_irq, NULL)) {
spin_unlock_irq(&ataflop_lock);
return BLK_STS_RESOURCE;
}
fd_request = bd->rq;
blk_mq_start_request(fd_request);
return rq;
}
static void redo_fd_request(void)
{
int drive, type;
struct atari_floppy_struct *floppy;
DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n",
fd_request, fd_request ? fd_request->rq_disk->disk_name : "",
fd_request ? blk_rq_pos(fd_request) : 0 ));
atari_disable_irq( IRQ_MFP_FDC );
IsFormatting = 0;
repeat:
if (!fd_request) {
fd_request = set_next_request();
if (!fd_request)
goto the_end;
}
floppy = fd_request->rq_disk->private_data;
drive = floppy - unit;
type = floppy->type;
if (!UD.connected) {
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
fd_end_request_cur(BLK_STS_IOERR);
goto repeat;
goto out;
}
if (type == 0) {
@@ -1462,23 +1514,18 @@ repeat:
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
goto repeat;
goto out;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
goto repeat;
goto out;
}
type = minor2disktype[type].index;
UDT = &atari_disk_type[type];
set_capacity(floppy->disk, UDT->blocks);
UD.autoprobe = 0;
}
if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
fd_end_request_cur(BLK_STS_IOERR);
goto repeat;
}
/* stop deselect timer */
del_timer( &motor_off_timer );
@@ -1490,22 +1537,13 @@ repeat:
setup_req_params( drive );
do_fd_action( drive );
return;
the_end:
finish_fdc();
}
void do_fd_request(struct request_queue * q)
{
DPRINT(("do_fd_request for pid %d\n",current->pid));
wait_event(fdc_wait, cmpxchg(&fdc_busy, 0, 1) == 0);
stdma_lock(floppy_irq, NULL);
atari_disable_irq( IRQ_MFP_FDC );
redo_fd_request();
if (bd->last)
finish_fdc();
atari_enable_irq( IRQ_MFP_FDC );
out:
spin_unlock_irq(&ataflop_lock);
return BLK_STS_OK;
}
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
@@ -1583,7 +1621,6 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* what if type > 0 here? Overwrite specified entry ? */
if (type) {
/* refuse to re-set a predefined type for now */
redo_fd_request();
return -EINVAL;
}
@@ -1651,10 +1688,8 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* sanity check */
if (setprm.track != dtp->blocks/dtp->spt/2 ||
setprm.head != 2) {
redo_fd_request();
setprm.head != 2)
return -EINVAL;
}
UDT = dtp;
set_capacity(floppy->disk, UDT->blocks);
@@ -1910,6 +1945,10 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
static const struct blk_mq_ops ataflop_mq_ops = {
.queue_rq = ataflop_queue_rq,
};
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
{
int drive = *part & 3;
@@ -1923,6 +1962,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
static int __init atari_floppy_init (void)
{
int i;
int ret;
if (!MACH_IS_ATARI)
/* Amiga, Mac, ... don't have Atari-compatible floppy :-) */
@@ -1933,8 +1973,19 @@ static int __init atari_floppy_init (void)
for (i = 0; i < FD_MAX_UNITS; i++) {
unit[i].disk = alloc_disk(1);
if (!unit[i].disk)
goto Enomem;
if (!unit[i].disk) {
ret = -ENOMEM;
goto err;
}
unit[i].disk->queue = blk_mq_init_sq_queue(&unit[i].tag_set,
&ataflop_mq_ops, 2,
BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(unit[i].disk->queue)) {
ret = PTR_ERR(unit[i].disk->queue);
unit[i].disk->queue = NULL;
goto err;
}
}
if (UseTrackbuffer < 0)
@@ -1951,7 +2002,8 @@ static int __init atari_floppy_init (void)
DMABuffer = atari_stram_alloc(BUFFER_SIZE+512, "ataflop");
if (!DMABuffer) {
printk(KERN_ERR "atari_floppy_init: cannot get dma buffer\n");
goto Enomem;
ret = -ENOMEM;
goto err;
}
TrackBuffer = DMABuffer + 512;
PhysDMABuffer = atari_stram_to_phys(DMABuffer);
@@ -1966,10 +2018,6 @@ static int __init atari_floppy_init (void)
sprintf(unit[i].disk->disk_name, "fd%d", i);
unit[i].disk->fops = &floppy_fops;
unit[i].disk->private_data = &unit[i];
unit[i].disk->queue = blk_init_queue(do_fd_request,
&ataflop_lock);
if (!unit[i].disk->queue)
goto Enomem;
set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
add_disk(unit[i].disk);
}
@@ -1983,17 +2031,23 @@ static int __init atari_floppy_init (void)
config_types();
return 0;
Enomem:
while (i--) {
struct request_queue *q = unit[i].disk->queue;
put_disk(unit[i].disk);
if (q)
blk_cleanup_queue(q);
}
err:
do {
struct gendisk *disk = unit[i].disk;
if (disk) {
if (disk->queue) {
blk_cleanup_queue(disk->queue);
disk->queue = NULL;
}
blk_mq_free_tag_set(&unit[i].tag_set);
put_disk(unit[i].disk);
}
} while (i--);
unregister_blkdev(FLOPPY_MAJOR, "fd");
return -ENOMEM;
return ret;
}
#ifndef MODULE
@@ -2040,11 +2094,10 @@ static void __exit atari_floppy_exit(void)
int i;
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
for (i = 0; i < FD_MAX_UNITS; i++) {
struct request_queue *q = unit[i].disk->queue;
del_gendisk(unit[i].disk);
blk_cleanup_queue(unit[i].disk->queue);
blk_mq_free_tag_set(&unit[i].tag_set);
put_disk(unit[i].disk);
blk_cleanup_queue(q);
}
unregister_blkdev(FLOPPY_MAJOR, "fd");

View File

@@ -11,7 +11,6 @@ config BLK_DEV_DRBD
depends on PROC_FS && INET
select LRU_CACHE
select LIBCRC32C
default n
help
NOTE: In order to authenticate connections you have to select

View File

@@ -429,7 +429,7 @@ enum {
__EE_CALL_AL_COMPLETE_IO,
__EE_MAY_SET_IN_SYNC,
/* is this a TRIM aka REQ_DISCARD? */
/* is this a TRIM aka REQ_OP_DISCARD? */
__EE_IS_TRIM,
/* In case a barrier failed,
@@ -724,10 +724,10 @@ struct drbd_connection {
struct list_head transfer_log; /* all requests not yet fully processed */
struct crypto_shash *cram_hmac_tfm;
struct crypto_ahash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
struct crypto_ahash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
struct crypto_ahash *csums_tfm;
struct crypto_ahash *verify_tfm;
struct crypto_shash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
struct crypto_shash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
struct crypto_shash *csums_tfm;
struct crypto_shash *verify_tfm;
void *int_dig_in;
void *int_dig_vv;
@@ -1531,8 +1531,9 @@ static inline void ov_out_of_sync_print(struct drbd_device *device)
}
extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *,
void *);
/* worker callbacks */
extern int w_e_end_data_req(struct drbd_work *, int);
extern int w_e_end_rsdata_req(struct drbd_work *, int);

View File

@@ -1377,7 +1377,7 @@ void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd
struct p_data *dp, int data_size)
{
if (peer_device->connection->peer_integrity_tfm)
data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
data_size -= crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
_drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
dp->block_id);
}
@@ -1673,7 +1673,7 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
}
/* Used to send write or TRIM aka REQ_DISCARD requests
/* Used to send write or TRIM aka REQ_OP_DISCARD requests
* R_PRIMARY -> Peer (P_DATA, P_TRIM)
*/
int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
@@ -1690,7 +1690,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
digest_size = peer_device->connection->integrity_tfm ?
crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -1796,7 +1796,7 @@ int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
p = drbd_prepare_command(peer_device, sock);
digest_size = peer_device->connection->integrity_tfm ?
crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -2557,11 +2557,11 @@ void conn_free_crypto(struct drbd_connection *connection)
{
drbd_free_sock(connection);
crypto_free_ahash(connection->csums_tfm);
crypto_free_ahash(connection->verify_tfm);
crypto_free_shash(connection->csums_tfm);
crypto_free_shash(connection->verify_tfm);
crypto_free_shash(connection->cram_hmac_tfm);
crypto_free_ahash(connection->integrity_tfm);
crypto_free_ahash(connection->peer_integrity_tfm);
crypto_free_shash(connection->integrity_tfm);
crypto_free_shash(connection->peer_integrity_tfm);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);

View File

@@ -2303,10 +2303,10 @@ check_net_options(struct drbd_connection *connection, struct net_conf *new_net_c
}
struct crypto {
struct crypto_ahash *verify_tfm;
struct crypto_ahash *csums_tfm;
struct crypto_shash *verify_tfm;
struct crypto_shash *csums_tfm;
struct crypto_shash *cram_hmac_tfm;
struct crypto_ahash *integrity_tfm;
struct crypto_shash *integrity_tfm;
};
static int
@@ -2324,36 +2324,21 @@ alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
return NO_ERROR;
}
static int
alloc_ahash(struct crypto_ahash **tfm, char *tfm_name, int err_alg)
{
if (!tfm_name[0])
return NO_ERROR;
*tfm = crypto_alloc_ahash(tfm_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(*tfm)) {
*tfm = NULL;
return err_alg;
}
return NO_ERROR;
}
static enum drbd_ret_code
alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
{
char hmac_name[CRYPTO_MAX_ALG_NAME];
enum drbd_ret_code rv;
rv = alloc_ahash(&crypto->csums_tfm, new_net_conf->csums_alg,
rv = alloc_shash(&crypto->csums_tfm, new_net_conf->csums_alg,
ERR_CSUMS_ALG);
if (rv != NO_ERROR)
return rv;
rv = alloc_ahash(&crypto->verify_tfm, new_net_conf->verify_alg,
rv = alloc_shash(&crypto->verify_tfm, new_net_conf->verify_alg,
ERR_VERIFY_ALG);
if (rv != NO_ERROR)
return rv;
rv = alloc_ahash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
rv = alloc_shash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
ERR_INTEGRITY_ALG);
if (rv != NO_ERROR)
return rv;
@@ -2371,9 +2356,9 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
static void free_crypto(struct crypto *crypto)
{
crypto_free_shash(crypto->cram_hmac_tfm);
crypto_free_ahash(crypto->integrity_tfm);
crypto_free_ahash(crypto->csums_tfm);
crypto_free_ahash(crypto->verify_tfm);
crypto_free_shash(crypto->integrity_tfm);
crypto_free_shash(crypto->csums_tfm);
crypto_free_shash(crypto->verify_tfm);
}
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
@@ -2450,17 +2435,17 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
rcu_assign_pointer(connection->net_conf, new_net_conf);
if (!rsr) {
crypto_free_ahash(connection->csums_tfm);
crypto_free_shash(connection->csums_tfm);
connection->csums_tfm = crypto.csums_tfm;
crypto.csums_tfm = NULL;
}
if (!ovr) {
crypto_free_ahash(connection->verify_tfm);
crypto_free_shash(connection->verify_tfm);
connection->verify_tfm = crypto.verify_tfm;
crypto.verify_tfm = NULL;
}
crypto_free_ahash(connection->integrity_tfm);
crypto_free_shash(connection->integrity_tfm);
connection->integrity_tfm = crypto.integrity_tfm;
if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
/* Do this without trying to take connection->data.mutex again. */

View File

@@ -57,7 +57,7 @@ enum drbd_packet {
P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */
/* 0x2e to 0x30 reserved, used in drbd 9 */
/* REQ_DISCARD. We used "discard" in different contexts before,
/* REQ_OP_DISCARD. We used "discard" in different contexts before,
* which is why I chose TRIM here, to disambiguate. */
P_TRIM = 0x31,
@@ -126,7 +126,7 @@ struct p_header100 {
#define DP_UNPLUG 8 /* not used anymore */
#define DP_FUA 16 /* equals REQ_FUA */
#define DP_FLUSH 32 /* equals REQ_PREFLUSH */
#define DP_DISCARD 64 /* equals REQ_DISCARD */
#define DP_DISCARD 64 /* equals REQ_OP_DISCARD */
#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
#define DP_WSAME 512 /* equiv. REQ_WRITE_SAME */

View File

@@ -1732,7 +1732,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
}
/* quick wrapper in case payload size != request_size (write same) */
static void drbd_csum_ee_size(struct crypto_ahash *h,
static void drbd_csum_ee_size(struct crypto_shash *h,
struct drbd_peer_request *r, void *d,
unsigned int payload_size)
{
@@ -1769,7 +1769,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
digest_size = 0;
if (!trim && peer_device->connection->peer_integrity_tfm) {
digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
/*
* FIXME: Receive the incoming digest into the receive buffer
* here, together with its struct p_data?
@@ -1905,7 +1905,7 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
digest_size = 0;
if (peer_device->connection->peer_integrity_tfm) {
digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
if (err)
return err;
@@ -3542,7 +3542,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
int p_proto, p_discard_my_data, p_two_primaries, cf;
struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
char integrity_alg[SHARED_SECRET_MAX] = "";
struct crypto_ahash *peer_integrity_tfm = NULL;
struct crypto_shash *peer_integrity_tfm = NULL;
void *int_dig_in = NULL, *int_dig_vv = NULL;
p_proto = be32_to_cpu(p->protocol);
@@ -3623,7 +3623,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
* change.
*/
peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
peer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(peer_integrity_tfm)) {
peer_integrity_tfm = NULL;
drbd_err(connection, "peer data-integrity-alg %s not supported\n",
@@ -3631,7 +3631,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
goto disconnect;
}
hash_size = crypto_ahash_digestsize(peer_integrity_tfm);
hash_size = crypto_shash_digestsize(peer_integrity_tfm);
int_dig_in = kmalloc(hash_size, GFP_KERNEL);
int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
if (!(int_dig_in && int_dig_vv)) {
@@ -3661,7 +3661,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
mutex_unlock(&connection->resource->conf_update);
mutex_unlock(&connection->data.mutex);
crypto_free_ahash(connection->peer_integrity_tfm);
crypto_free_shash(connection->peer_integrity_tfm);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);
connection->peer_integrity_tfm = peer_integrity_tfm;
@@ -3679,7 +3679,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
disconnect_rcu_unlock:
rcu_read_unlock();
disconnect:
crypto_free_ahash(peer_integrity_tfm);
crypto_free_shash(peer_integrity_tfm);
kfree(int_dig_in);
kfree(int_dig_vv);
conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
@@ -3691,15 +3691,16 @@ disconnect:
* return: NULL (alg name was "")
* ERR_PTR(error) if something goes wrong
* or the crypto hash ptr, if it worked out ok. */
static struct crypto_ahash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
static struct crypto_shash *drbd_crypto_alloc_digest_safe(
const struct drbd_device *device,
const char *alg, const char *name)
{
struct crypto_ahash *tfm;
struct crypto_shash *tfm;
if (!alg[0])
return NULL;
tfm = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC);
tfm = crypto_alloc_shash(alg, 0, 0);
if (IS_ERR(tfm)) {
drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
alg, name, PTR_ERR(tfm));
@@ -3752,8 +3753,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
struct drbd_device *device;
struct p_rs_param_95 *p;
unsigned int header_size, data_size, exp_max_sz;
struct crypto_ahash *verify_tfm = NULL;
struct crypto_ahash *csums_tfm = NULL;
struct crypto_shash *verify_tfm = NULL;
struct crypto_shash *csums_tfm = NULL;
struct net_conf *old_net_conf, *new_net_conf = NULL;
struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
const int apv = connection->agreed_pro_version;
@@ -3900,14 +3901,14 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
if (verify_tfm) {
strcpy(new_net_conf->verify_alg, p->verify_alg);
new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
crypto_free_ahash(peer_device->connection->verify_tfm);
crypto_free_shash(peer_device->connection->verify_tfm);
peer_device->connection->verify_tfm = verify_tfm;
drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
}
if (csums_tfm) {
strcpy(new_net_conf->csums_alg, p->csums_alg);
new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
crypto_free_ahash(peer_device->connection->csums_tfm);
crypto_free_shash(peer_device->connection->csums_tfm);
peer_device->connection->csums_tfm = csums_tfm;
drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
}
@@ -3951,9 +3952,9 @@ disconnect:
mutex_unlock(&connection->resource->conf_update);
/* just for completeness: actually not needed,
* as this is not reached if csums_tfm was ok. */
crypto_free_ahash(csums_tfm);
crypto_free_shash(csums_tfm);
/* but free the verify_tfm again, if csums_tfm did not work out */
crypto_free_ahash(verify_tfm);
crypto_free_shash(verify_tfm);
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}

View File

@@ -650,7 +650,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
case DISCARD_COMPLETED_NOTSUPP:
case DISCARD_COMPLETED_WITH_ERROR:
/* I'd rather not detach from local disk just because it
* failed a REQ_DISCARD. */
* failed a REQ_OP_DISCARD. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;

View File

@@ -152,7 +152,7 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l
do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee);
/* FIXME do we want to detach for failed REQ_DISCARD?
/* FIXME do we want to detach for failed REQ_OP_DISCARD?
* ((peer_req->flags & (EE_WAS_ERROR|EE_IS_TRIM)) == EE_WAS_ERROR) */
if (peer_req->flags & EE_WAS_ERROR)
__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
@@ -295,60 +295,61 @@ void drbd_request_endio(struct bio *bio)
complete_master_bio(device, &m);
}
void drbd_csum_ee(struct crypto_ahash *tfm, struct drbd_peer_request *peer_req, void *digest)
void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req, void *digest)
{
AHASH_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg;
SHASH_DESC_ON_STACK(desc, tfm);
struct page *page = peer_req->pages;
struct page *tmp;
unsigned len;
void *src;
ahash_request_set_tfm(req, tfm);
ahash_request_set_callback(req, 0, NULL, NULL);
desc->tfm = tfm;
desc->flags = 0;
sg_init_table(&sg, 1);
crypto_ahash_init(req);
crypto_shash_init(desc);
src = kmap_atomic(page);
while ((tmp = page_chain_next(page))) {
/* all but the last page will be fully used */
sg_set_page(&sg, page, PAGE_SIZE, 0);
ahash_request_set_crypt(req, &sg, NULL, sg.length);
crypto_ahash_update(req);
crypto_shash_update(desc, src, PAGE_SIZE);
kunmap_atomic(src);
page = tmp;
src = kmap_atomic(page);
}
/* and now the last, possibly only partially used page */
len = peer_req->i.size & (PAGE_SIZE - 1);
sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
ahash_request_set_crypt(req, &sg, digest, sg.length);
crypto_ahash_finup(req);
ahash_request_zero(req);
crypto_shash_update(desc, src, len ?: PAGE_SIZE);
kunmap_atomic(src);
crypto_shash_final(desc, digest);
shash_desc_zero(desc);
}
void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
{
AHASH_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg;
SHASH_DESC_ON_STACK(desc, tfm);
struct bio_vec bvec;
struct bvec_iter iter;
ahash_request_set_tfm(req, tfm);
ahash_request_set_callback(req, 0, NULL, NULL);
desc->tfm = tfm;
desc->flags = 0;
sg_init_table(&sg, 1);
crypto_ahash_init(req);
crypto_shash_init(desc);
bio_for_each_segment(bvec, bio, iter) {
sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
ahash_request_set_crypt(req, &sg, NULL, sg.length);
crypto_ahash_update(req);
u8 *src;
src = kmap_atomic(bvec.bv_page);
crypto_shash_update(desc, src + bvec.bv_offset, bvec.bv_len);
kunmap_atomic(src);
/* REQ_OP_WRITE_SAME has only one segment,
* checksum the payload only once. */
if (bio_op(bio) == REQ_OP_WRITE_SAME)
break;
}
ahash_request_set_crypt(req, NULL, digest, 0);
crypto_ahash_final(req);
ahash_request_zero(req);
crypto_shash_final(desc, digest);
shash_desc_zero(desc);
}
/* MAYBE merge common code with w_e_end_ov_req */
@@ -367,7 +368,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out;
digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
sector_t sector = peer_req->i.sector;
@@ -1205,7 +1206,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
* a real fix would be much more involved,
* introducing more locking mechanisms */
if (peer_device->connection->csums_tfm) {
digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
D_ASSERT(device, digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO);
}
@@ -1255,7 +1256,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
if (unlikely(cancel))
goto out;
digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
err = 1; /* terminate the connection in case the allocation failed */
@@ -1327,7 +1328,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
di = peer_req->digest;
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);

View File

@@ -252,13 +252,13 @@ static int allowed_drive_mask = 0x33;
static int irqdma_allocated;
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/cdrom.h> /* for the compatibility eject ioctl */
#include <linux/completion.h>
static LIST_HEAD(floppy_reqs);
static struct request *current_req;
static void do_fd_request(struct request_queue *q);
static int set_next_request(void);
#ifndef fd_get_dma_residue
@@ -414,10 +414,10 @@ static struct floppy_drive_struct drive_state[N_DRIVE];
static struct floppy_write_errors write_errors[N_DRIVE];
static struct timer_list motor_off_timer[N_DRIVE];
static struct gendisk *disks[N_DRIVE];
static struct blk_mq_tag_set tag_sets[N_DRIVE];
static struct block_device *opened_bdev[N_DRIVE];
static DEFINE_MUTEX(open_lock);
static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
static int fdc_queue;
/*
* This struct defines the different floppy types.
@@ -2216,8 +2216,9 @@ static void floppy_end_request(struct request *req, blk_status_t error)
/* current_count_sectors can be zero if transfer failed */
if (error)
nr_sectors = blk_rq_cur_sectors(req);
if (__blk_end_request(req, error, nr_sectors << 9))
if (blk_update_request(req, error, nr_sectors << 9))
return;
__blk_mq_end_request(req, error);
/* We're done with the request */
floppy_off(drive);
@@ -2797,27 +2798,14 @@ static int make_raw_rw_request(void)
return 2;
}
/*
* Round-robin between our available drives, doing one request from each
*/
static int set_next_request(void)
{
struct request_queue *q;
int old_pos = fdc_queue;
do {
q = disks[fdc_queue]->queue;
if (++fdc_queue == N_DRIVE)
fdc_queue = 0;
if (q) {
current_req = blk_fetch_request(q);
if (current_req) {
current_req->error_count = 0;
break;
}
}
} while (fdc_queue != old_pos);
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
queuelist);
if (current_req) {
current_req->error_count = 0;
list_del_init(&current_req->queuelist);
}
return current_req != NULL;
}
@@ -2901,29 +2889,38 @@ static void process_fd_request(void)
schedule_bh(redo_fd_request);
}
static void do_fd_request(struct request_queue *q)
static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
blk_mq_start_request(bd->rq);
if (WARN(max_buffer_sectors == 0,
"VFS: %s called on non-open device\n", __func__))
return;
return BLK_STS_IOERR;
if (WARN(atomic_read(&usage_count) == 0,
"warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
current_req, (long)blk_rq_pos(current_req),
(unsigned long long) current_req->cmd_flags))
return;
return BLK_STS_IOERR;
spin_lock_irq(&floppy_lock);
list_add_tail(&bd->rq->queuelist, &floppy_reqs);
spin_unlock_irq(&floppy_lock);
if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
return;
return BLK_STS_OK;
}
command_status = FD_COMMAND_NONE;
__reschedule_timeout(MAXTIMEOUT, "fd_request");
set_fdc(0);
process_fd_request();
is_alive(__func__, "");
return BLK_STS_OK;
}
static const struct cont_t poll_cont = {
@@ -4486,6 +4483,10 @@ static struct platform_driver floppy_driver = {
},
};
static const struct blk_mq_ops floppy_mq_ops = {
.queue_rq = floppy_queue_rq,
};
static struct platform_device floppy_device[N_DRIVE];
static bool floppy_available(int drive)
@@ -4533,9 +4534,12 @@ static int __init do_floppy_init(void)
goto out_put_disk;
}
disks[drive]->queue = blk_init_queue(do_fd_request, &floppy_lock);
if (!disks[drive]->queue) {
err = -ENOMEM;
disks[drive]->queue = blk_mq_init_sq_queue(&tag_sets[drive],
&floppy_mq_ops, 2,
BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disks[drive]->queue)) {
err = PTR_ERR(disks[drive]->queue);
disks[drive]->queue = NULL;
goto out_put_disk;
}
@@ -4679,7 +4683,7 @@ static int __init do_floppy_init(void)
/* to be cleaned up... */
disks[drive]->private_data = (void *)(long)drive;
disks[drive]->flags |= GENHD_FL_REMOVABLE;
device_add_disk(&floppy_device[drive].dev, disks[drive]);
device_add_disk(&floppy_device[drive].dev, disks[drive], NULL);
}
return 0;
@@ -4708,6 +4712,7 @@ out_put_disk:
del_timer_sync(&motor_off_timer[drive]);
blk_cleanup_queue(disks[drive]->queue);
disks[drive]->queue = NULL;
blk_mq_free_tag_set(&tag_sets[drive]);
}
put_disk(disks[drive]);
}
@@ -4935,6 +4940,7 @@ static void __exit floppy_module_exit(void)
platform_device_unregister(&floppy_device[drive]);
}
blk_cleanup_queue(disks[drive]->queue);
blk_mq_free_tag_set(&tag_sets[drive]);
/*
* These disks have not called add_disk(). Don't put down

View File

@@ -77,6 +77,7 @@
#include <linux/falloc.h>
#include <linux/uio.h>
#include <linux/ioprio.h>
#include <linux/blk-cgroup.h>
#include "loop.h"
@@ -1760,8 +1761,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* always use the first bio's css */
#ifdef CONFIG_BLK_CGROUP
if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
cmd->css = rq->bio->bi_css;
if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
cmd->css = &bio_blkcg(rq->bio)->css;
css_get(cmd->css);
} else
#endif

View File

@@ -1862,11 +1862,9 @@ static int exec_drive_taskfile(struct driver_data *dd,
if (IS_ERR(outbuf))
return PTR_ERR(outbuf);
outbuf_dma = pci_map_single(dd->pdev,
outbuf,
taskout,
DMA_TO_DEVICE);
if (pci_dma_mapping_error(dd->pdev, outbuf_dma)) {
outbuf_dma = dma_map_single(&dd->pdev->dev, outbuf,
taskout, DMA_TO_DEVICE);
if (dma_mapping_error(&dd->pdev->dev, outbuf_dma)) {
err = -ENOMEM;
goto abort;
}
@@ -1880,10 +1878,9 @@ static int exec_drive_taskfile(struct driver_data *dd,
inbuf = NULL;
goto abort;
}
inbuf_dma = pci_map_single(dd->pdev,
inbuf,
taskin, DMA_FROM_DEVICE);
if (pci_dma_mapping_error(dd->pdev, inbuf_dma)) {
inbuf_dma = dma_map_single(&dd->pdev->dev, inbuf,
taskin, DMA_FROM_DEVICE);
if (dma_mapping_error(&dd->pdev->dev, inbuf_dma)) {
err = -ENOMEM;
goto abort;
}
@@ -2002,11 +1999,11 @@ static int exec_drive_taskfile(struct driver_data *dd,
/* reclaim the DMA buffers.*/
if (inbuf_dma)
pci_unmap_single(dd->pdev, inbuf_dma,
taskin, DMA_FROM_DEVICE);
dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
DMA_FROM_DEVICE);
if (outbuf_dma)
pci_unmap_single(dd->pdev, outbuf_dma,
taskout, DMA_TO_DEVICE);
dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
DMA_TO_DEVICE);
inbuf_dma = 0;
outbuf_dma = 0;
@@ -2053,11 +2050,11 @@ static int exec_drive_taskfile(struct driver_data *dd,
}
abort:
if (inbuf_dma)
pci_unmap_single(dd->pdev, inbuf_dma,
taskin, DMA_FROM_DEVICE);
dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
DMA_FROM_DEVICE);
if (outbuf_dma)
pci_unmap_single(dd->pdev, outbuf_dma,
taskout, DMA_TO_DEVICE);
dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
DMA_TO_DEVICE);
kfree(outbuf);
kfree(inbuf);
@@ -3861,7 +3858,7 @@ skip_create_disk:
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
device_add_disk(&dd->pdev->dev, dd->disk);
device_add_disk(&dd->pdev->dev, dd->disk, NULL);
dd->bdev = bdget_disk(dd->disk, 0);
/*
@@ -4216,18 +4213,10 @@ static int mtip_pci_probe(struct pci_dev *pdev,
goto iomap_err;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rv) {
rv = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
if (rv) {
dev_warn(&pdev->dev,
"64-bit DMA enable failed\n");
goto setmask_err;
}
}
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rv) {
dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
goto setmask_err;
}
/* Copy the info we may need later into the private data structure. */

View File

@@ -606,20 +606,12 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
struct request_queue *q = NULL;
int queue_mode = cmd->nq->dev->queue_mode;
if (cmd->rq)
q = cmd->rq->q;
switch (queue_mode) {
case NULL_Q_MQ:
blk_mq_end_request(cmd->rq, cmd->error);
return;
case NULL_Q_RQ:
INIT_LIST_HEAD(&cmd->rq->queuelist);
blk_end_request_all(cmd->rq, cmd->error);
break;
case NULL_Q_BIO:
cmd->bio->bi_status = cmd->error;
bio_endio(cmd->bio);
@@ -627,15 +619,6 @@ static void end_cmd(struct nullb_cmd *cmd)
}
free_cmd(cmd);
/* Restart queue if needed, as we are freeing a tag */
if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue_async(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
@@ -1136,25 +1119,14 @@ static void null_stop_queue(struct nullb *nullb)
if (nullb->dev->queue_mode == NULL_Q_MQ)
blk_mq_stop_hw_queues(q);
else {
spin_lock_irq(q->queue_lock);
blk_stop_queue(q);
spin_unlock_irq(q->queue_lock);
}
}
static void null_restart_queue_async(struct nullb *nullb)
{
struct request_queue *q = nullb->q;
unsigned long flags;
if (nullb->dev->queue_mode == NULL_Q_MQ)
blk_mq_start_stopped_hw_queues(q, true);
else {
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue_async(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
@@ -1197,17 +1169,8 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
/* race with timer */
if (atomic_long_read(&nullb->cur_bytes) > 0)
null_restart_queue_async(nullb);
if (dev->queue_mode == NULL_Q_RQ) {
struct request_queue *q = nullb->q;
spin_lock_irq(q->queue_lock);
rq->rq_flags |= RQF_DONTPREP;
blk_requeue_request(q, rq);
spin_unlock_irq(q->queue_lock);
return BLK_STS_OK;
} else
/* requeue request */
return BLK_STS_DEV_RESOURCE;
/* requeue request */
return BLK_STS_DEV_RESOURCE;
}
}
@@ -1278,9 +1241,6 @@ out:
case NULL_Q_MQ:
blk_mq_complete_request(cmd->rq);
break;
case NULL_Q_RQ:
blk_complete_request(cmd->rq);
break;
case NULL_Q_BIO:
/*
* XXX: no proper submitting cpu information available.
@@ -1349,30 +1309,6 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
{
pr_info("null: rq %p timed out\n", rq);
__blk_complete_request(rq);
return BLK_EH_DONE;
}
static int null_rq_prep_fn(struct request_queue *q, struct request *req)
{
struct nullb *nullb = q->queuedata;
struct nullb_queue *nq = nullb_to_queue(nullb);
struct nullb_cmd *cmd;
cmd = alloc_cmd(nq, 0);
if (cmd) {
cmd->rq = req;
req->special = cmd;
return BLKPREP_OK;
}
blk_stop_queue(q);
return BLKPREP_DEFER;
}
static bool should_timeout_request(struct request *rq)
{
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
@@ -1391,27 +1327,6 @@ static bool should_requeue_request(struct request *rq)
return false;
}
static void null_request_fn(struct request_queue *q)
{
struct request *rq;
while ((rq = blk_fetch_request(q)) != NULL) {
struct nullb_cmd *cmd = rq->special;
/* just ignore the request */
if (should_timeout_request(rq))
continue;
if (should_requeue_request(rq)) {
blk_requeue_request(q, rq);
continue;
}
spin_unlock_irq(q->queue_lock);
null_handle_cmd(cmd);
spin_lock_irq(q->queue_lock);
}
}
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
pr_info("null: rq %p timed out\n", rq);
@@ -1766,24 +1681,6 @@ static int null_add_dev(struct nullb_device *dev)
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
dev->home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
}
if (!null_setup_fault())
goto out_cleanup_blk_queue;
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn);
nullb->q->rq_timeout = 5 * HZ;
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
}
if (dev->mbps) {
@@ -1865,6 +1762,10 @@ static int __init null_init(void)
return -EINVAL;
}
if (g_queue_mode == NULL_Q_RQ) {
pr_err("null_blk: legacy IO path no longer available\n");
return -EINVAL;
}
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.\n",

View File

@@ -137,7 +137,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
#include <linux/delay.h>
#include <linux/cdrom.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
@@ -186,7 +186,8 @@ static int pcd_packet(struct cdrom_device_info *cdi,
static int pcd_detect(void);
static void pcd_probe_capabilities(void);
static void do_pcd_read_drq(void);
static void do_pcd_request(struct request_queue * q);
static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd);
static void do_pcd_read(void);
struct pcd_unit {
@@ -199,6 +200,8 @@ struct pcd_unit {
char *name; /* pcd0, pcd1, etc */
struct cdrom_device_info info; /* uniform cdrom interface */
struct gendisk *disk;
struct blk_mq_tag_set tag_set;
struct list_head rq_list;
};
static struct pcd_unit pcd[PCD_UNITS];
@@ -292,6 +295,10 @@ static const struct cdrom_device_ops pcd_dops = {
CDC_CD_RW,
};
static const struct blk_mq_ops pcd_mq_ops = {
.queue_rq = pcd_queue_rq,
};
static void pcd_init_units(void)
{
struct pcd_unit *cd;
@@ -300,13 +307,19 @@ static void pcd_init_units(void)
pcd_drive_count = 0;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
struct gendisk *disk = alloc_disk(1);
if (!disk)
continue;
disk->queue = blk_init_queue(do_pcd_request, &pcd_lock);
if (!disk->queue) {
put_disk(disk);
disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
1, BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disk->queue)) {
disk->queue = NULL;
continue;
}
INIT_LIST_HEAD(&cd->rq_list);
disk->queue->queuedata = cd;
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
cd->disk = disk;
cd->pi = &cd->pia;
@@ -748,18 +761,18 @@ static int pcd_queue;
static int set_next_request(void)
{
struct pcd_unit *cd;
struct request_queue *q;
int old_pos = pcd_queue;
do {
cd = &pcd[pcd_queue];
q = cd->present ? cd->disk->queue : NULL;
if (++pcd_queue == PCD_UNITS)
pcd_queue = 0;
if (q) {
pcd_req = blk_fetch_request(q);
if (pcd_req)
break;
if (cd->present && !list_empty(&cd->rq_list)) {
pcd_req = list_first_entry(&cd->rq_list, struct request,
queuelist);
list_del_init(&pcd_req->queuelist);
blk_mq_start_request(pcd_req);
break;
}
} while (pcd_queue != old_pos);
@@ -768,33 +781,41 @@ static int set_next_request(void)
static void pcd_request(void)
{
struct pcd_unit *cd;
if (pcd_busy)
return;
while (1) {
if (!pcd_req && !set_next_request())
return;
if (rq_data_dir(pcd_req) == READ) {
struct pcd_unit *cd = pcd_req->rq_disk->private_data;
if (cd != pcd_current)
pcd_bufblk = -1;
pcd_current = cd;
pcd_sector = blk_rq_pos(pcd_req);
pcd_count = blk_rq_cur_sectors(pcd_req);
pcd_buf = bio_data(pcd_req->bio);
pcd_busy = 1;
ps_set_intr(do_pcd_read, NULL, 0, nice);
return;
} else {
__blk_end_request_all(pcd_req, BLK_STS_IOERR);
pcd_req = NULL;
}
}
if (!pcd_req && !set_next_request())
return;
cd = pcd_req->rq_disk->private_data;
if (cd != pcd_current)
pcd_bufblk = -1;
pcd_current = cd;
pcd_sector = blk_rq_pos(pcd_req);
pcd_count = blk_rq_cur_sectors(pcd_req);
pcd_buf = bio_data(pcd_req->bio);
pcd_busy = 1;
ps_set_intr(do_pcd_read, NULL, 0, nice);
}
static void do_pcd_request(struct request_queue *q)
static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct pcd_unit *cd = hctx->queue->queuedata;
if (rq_data_dir(bd->rq) != READ) {
blk_mq_start_request(bd->rq);
return BLK_STS_IOERR;
}
spin_lock_irq(&pcd_lock);
list_add_tail(&bd->rq->queuelist, &cd->rq_list);
pcd_request();
spin_unlock_irq(&pcd_lock);
return BLK_STS_OK;
}
static inline void next_request(blk_status_t err)
@@ -802,8 +823,10 @@ static inline void next_request(blk_status_t err)
unsigned long saved_flags;
spin_lock_irqsave(&pcd_lock, saved_flags);
if (!__blk_end_request_cur(pcd_req, err))
if (!blk_update_request(pcd_req, err, blk_rq_cur_bytes(pcd_req))) {
__blk_mq_end_request(pcd_req, err);
pcd_req = NULL;
}
pcd_busy = 0;
pcd_request();
spin_unlock_irqrestore(&pcd_lock, saved_flags);
@@ -1011,6 +1034,7 @@ static void __exit pcd_exit(void)
unregister_cdrom(&cd->info);
}
blk_cleanup_queue(cd->disk->queue);
blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
}
unregister_blkdev(major, name);

View File

@@ -151,7 +151,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
#include <linux/delay.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h> /* for the eject ioctl */
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
@@ -236,6 +236,8 @@ struct pd_unit {
int alt_geom;
char name[PD_NAMELEN]; /* pda, pdb, etc ... */
struct gendisk *gd;
struct blk_mq_tag_set tag_set;
struct list_head rq_list;
};
static struct pd_unit pd[PD_UNITS];
@@ -399,9 +401,17 @@ static int set_next_request(void)
if (++pd_queue == PD_UNITS)
pd_queue = 0;
if (q) {
pd_req = blk_fetch_request(q);
if (pd_req)
break;
struct pd_unit *disk = q->queuedata;
if (list_empty(&disk->rq_list))
continue;
pd_req = list_first_entry(&disk->rq_list,
struct request,
queuelist);
list_del_init(&pd_req->queuelist);
blk_mq_start_request(pd_req);
break;
}
} while (pd_queue != old_pos);
@@ -412,7 +422,6 @@ static void run_fsm(void)
{
while (1) {
enum action res;
unsigned long saved_flags;
int stop = 0;
if (!phase) {
@@ -433,19 +442,24 @@ static void run_fsm(void)
}
switch(res = phase()) {
case Ok: case Fail:
case Ok: case Fail: {
blk_status_t err;
err = res == Ok ? 0 : BLK_STS_IOERR;
pi_disconnect(pi_current);
pd_claimed = 0;
phase = NULL;
spin_lock_irqsave(&pd_lock, saved_flags);
if (!__blk_end_request_cur(pd_req,
res == Ok ? 0 : BLK_STS_IOERR)) {
if (!set_next_request())
stop = 1;
spin_lock_irq(&pd_lock);
if (!blk_update_request(pd_req, err,
blk_rq_cur_bytes(pd_req))) {
__blk_mq_end_request(pd_req, err);
pd_req = NULL;
stop = !set_next_request();
}
spin_unlock_irqrestore(&pd_lock, saved_flags);
spin_unlock_irq(&pd_lock);
if (stop)
return;
}
/* fall through */
case Hold:
schedule_fsm();
@@ -505,11 +519,17 @@ static int pd_next_buf(void)
if (pd_count)
return 0;
spin_lock_irqsave(&pd_lock, saved_flags);
__blk_end_request_cur(pd_req, 0);
pd_count = blk_rq_cur_sectors(pd_req);
pd_buf = bio_data(pd_req->bio);
if (!blk_update_request(pd_req, 0, blk_rq_cur_bytes(pd_req))) {
__blk_mq_end_request(pd_req, 0);
pd_req = NULL;
pd_count = 0;
pd_buf = NULL;
} else {
pd_count = blk_rq_cur_sectors(pd_req);
pd_buf = bio_data(pd_req->bio);
}
spin_unlock_irqrestore(&pd_lock, saved_flags);
return 0;
return !pd_count;
}
static unsigned long pd_timeout;
@@ -726,15 +746,21 @@ static enum action pd_identify(struct pd_unit *disk)
/* end of io request engine */
static void do_pd_request(struct request_queue * q)
static blk_status_t pd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
if (pd_req)
return;
pd_req = blk_fetch_request(q);
if (!pd_req)
return;
struct pd_unit *disk = hctx->queue->queuedata;
schedule_fsm();
spin_lock_irq(&pd_lock);
if (!pd_req) {
pd_req = bd->rq;
blk_mq_start_request(pd_req);
} else
list_add_tail(&bd->rq->queuelist, &disk->rq_list);
spin_unlock_irq(&pd_lock);
run_fsm();
return BLK_STS_OK;
}
static int pd_special_command(struct pd_unit *disk,
@@ -847,23 +873,33 @@ static const struct block_device_operations pd_fops = {
/* probing */
static const struct blk_mq_ops pd_mq_ops = {
.queue_rq = pd_queue_rq,
};
static void pd_probe_drive(struct pd_unit *disk)
{
struct gendisk *p = alloc_disk(1 << PD_BITS);
struct gendisk *p;
p = alloc_disk(1 << PD_BITS);
if (!p)
return;
strcpy(p->disk_name, disk->name);
p->fops = &pd_fops;
p->major = major;
p->first_minor = (disk - pd) << PD_BITS;
disk->gd = p;
p->private_data = disk;
p->queue = blk_init_queue(do_pd_request, &pd_lock);
if (!p->queue) {
disk->gd = NULL;
put_disk(p);
p->queue = blk_mq_init_sq_queue(&disk->tag_set, &pd_mq_ops, 2,
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
if (IS_ERR(p->queue)) {
p->queue = NULL;
return;
}
p->queue->queuedata = disk;
blk_queue_max_hw_sectors(p->queue, cluster);
blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
@@ -895,6 +931,7 @@ static int pd_detect(void)
disk->standby = parm[D_SBY];
if (parm[D_PRT])
pd_drive_count++;
INIT_LIST_HEAD(&disk->rq_list);
}
par_drv = pi_register_driver(name);
@@ -972,6 +1009,7 @@ static void __exit pd_exit(void)
disk->gd = NULL;
del_gendisk(p);
blk_cleanup_queue(p->queue);
blk_mq_free_tag_set(&disk->tag_set);
put_disk(p);
pi_release(disk->pi);
}

View File

@@ -152,7 +152,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
@@ -206,7 +206,8 @@ module_param_array(drive3, int, NULL, 0);
#define ATAPI_WRITE_10 0x2a
static int pf_open(struct block_device *bdev, fmode_t mode);
static void do_pf_request(struct request_queue * q);
static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd);
static int pf_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -238,6 +239,8 @@ struct pf_unit {
int present; /* device present ? */
char name[PF_NAMELEN]; /* pf0, pf1, ... */
struct gendisk *disk;
struct blk_mq_tag_set tag_set;
struct list_head rq_list;
};
static struct pf_unit units[PF_UNITS];
@@ -277,6 +280,10 @@ static const struct block_device_operations pf_fops = {
.check_events = pf_check_events,
};
static const struct blk_mq_ops pf_mq_ops = {
.queue_rq = pf_queue_rq,
};
static void __init pf_init_units(void)
{
struct pf_unit *pf;
@@ -284,14 +291,22 @@ static void __init pf_init_units(void)
pf_drive_count = 0;
for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
struct gendisk *disk = alloc_disk(1);
struct gendisk *disk;
disk = alloc_disk(1);
if (!disk)
continue;
disk->queue = blk_init_queue(do_pf_request, &pf_spin_lock);
if (!disk->queue) {
disk->queue = blk_mq_init_sq_queue(&pf->tag_set, &pf_mq_ops,
1, BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disk->queue)) {
put_disk(disk);
return;
disk->queue = NULL;
continue;
}
INIT_LIST_HEAD(&pf->rq_list);
disk->queue->queuedata = pf;
blk_queue_max_segments(disk->queue, cluster);
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
pf->disk = disk;
@@ -784,18 +799,18 @@ static int pf_queue;
static int set_next_request(void)
{
struct pf_unit *pf;
struct request_queue *q;
int old_pos = pf_queue;
do {
pf = &units[pf_queue];
q = pf->present ? pf->disk->queue : NULL;
if (++pf_queue == PF_UNITS)
pf_queue = 0;
if (q) {
pf_req = blk_fetch_request(q);
if (pf_req)
break;
if (pf->present && !list_empty(&pf->rq_list)) {
pf_req = list_first_entry(&pf->rq_list, struct request,
queuelist);
list_del_init(&pf_req->queuelist);
blk_mq_start_request(pf_req);
break;
}
} while (pf_queue != old_pos);
@@ -804,8 +819,12 @@ static int set_next_request(void)
static void pf_end_request(blk_status_t err)
{
if (pf_req && !__blk_end_request_cur(pf_req, err))
if (!pf_req)
return;
if (!blk_update_request(pf_req, err, blk_rq_cur_bytes(pf_req))) {
__blk_mq_end_request(pf_req, err);
pf_req = NULL;
}
}
static void pf_request(void)
@@ -842,9 +861,17 @@ repeat:
}
}
static void do_pf_request(struct request_queue *q)
static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct pf_unit *pf = hctx->queue->queuedata;
spin_lock_irq(&pf_spin_lock);
list_add_tail(&bd->rq->queuelist, &pf->rq_list);
pf_request();
spin_unlock_irq(&pf_spin_lock);
return BLK_STS_OK;
}
static int pf_next_buf(void)
@@ -1024,6 +1051,7 @@ static void __exit pf_exit(void)
continue;
del_gendisk(pf->disk);
blk_cleanup_queue(pf->disk->queue);
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
pi_release(pf->pi);
}

View File

@@ -2645,7 +2645,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
*/
if (pd->refcnt == 1)
pkt_lock_door(pd, 0);
/* fallthru */
/* fall through */
/*
* forward selected CDROM ioctls to CD-ROM, for UDF
*/

View File

@@ -19,7 +19,7 @@
*/
#include <linux/ata.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -42,6 +42,7 @@
struct ps3disk_private {
spinlock_t lock; /* Request queue spinlock */
struct request_queue *queue;
struct blk_mq_tag_set tag_set;
struct gendisk *gendisk;
unsigned int blocking_factor;
struct request *req;
@@ -118,8 +119,8 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
}
}
static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
struct request *req)
static blk_status_t ps3disk_submit_request_sg(struct ps3_storage_device *dev,
struct request *req)
{
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
int write = rq_data_dir(req), res;
@@ -158,16 +159,15 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
__LINE__, op, res);
__blk_end_request_all(req, BLK_STS_IOERR);
return 0;
return BLK_STS_IOERR;
}
priv->req = req;
return 1;
return BLK_STS_OK;
}
static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
struct request *req)
static blk_status_t ps3disk_submit_flush_request(struct ps3_storage_device *dev,
struct request *req)
{
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
u64 res;
@@ -180,50 +180,45 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
__blk_end_request_all(req, BLK_STS_IOERR);
return 0;
return BLK_STS_IOERR;
}
priv->req = req;
return 1;
return BLK_STS_OK;
}
static void ps3disk_do_request(struct ps3_storage_device *dev,
struct request_queue *q)
static blk_status_t ps3disk_do_request(struct ps3_storage_device *dev,
struct request *req)
{
struct request *req;
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
while ((req = blk_fetch_request(q))) {
switch (req_op(req)) {
case REQ_OP_FLUSH:
if (ps3disk_submit_flush_request(dev, req))
return;
break;
case REQ_OP_READ:
case REQ_OP_WRITE:
if (ps3disk_submit_request_sg(dev, req))
return;
break;
default:
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
__blk_end_request_all(req, BLK_STS_IOERR);
}
switch (req_op(req)) {
case REQ_OP_FLUSH:
return ps3disk_submit_flush_request(dev, req);
case REQ_OP_READ:
case REQ_OP_WRITE:
return ps3disk_submit_request_sg(dev, req);
default:
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
return BLK_STS_IOERR;
}
}
static void ps3disk_request(struct request_queue *q)
static blk_status_t ps3disk_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request_queue *q = hctx->queue;
struct ps3_storage_device *dev = q->queuedata;
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
blk_status_t ret;
if (priv->req) {
dev_dbg(&dev->sbd.core, "%s:%u busy\n", __func__, __LINE__);
return;
}
blk_mq_start_request(bd->rq);
ps3disk_do_request(dev, q);
spin_lock_irq(&priv->lock);
ret = ps3disk_do_request(dev, bd->rq);
spin_unlock_irq(&priv->lock);
return ret;
}
static irqreturn_t ps3disk_interrupt(int irq, void *data)
@@ -280,11 +275,11 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
}
spin_lock(&priv->lock);
__blk_end_request_all(req, error);
priv->req = NULL;
ps3disk_do_request(dev, priv->queue);
blk_mq_end_request(req, error);
spin_unlock(&priv->lock);
blk_mq_run_hw_queues(priv->queue, true);
return IRQ_HANDLED;
}
@@ -404,6 +399,10 @@ static unsigned long ps3disk_mask;
static DEFINE_MUTEX(ps3disk_mask_mutex);
static const struct blk_mq_ops ps3disk_mq_ops = {
.queue_rq = ps3disk_queue_rq,
};
static int ps3disk_probe(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
@@ -454,11 +453,12 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
ps3disk_identify(dev);
queue = blk_init_queue(ps3disk_request, &priv->lock);
if (!queue) {
dev_err(&dev->sbd.core, "%s:%u: blk_init_queue failed\n",
queue = blk_mq_init_sq_queue(&priv->tag_set, &ps3disk_mq_ops, 1,
BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(queue)) {
dev_err(&dev->sbd.core, "%s:%u: blk_mq_init_queue failed\n",
__func__, __LINE__);
error = -ENOMEM;
error = PTR_ERR(queue);
goto fail_teardown;
}
@@ -500,11 +500,12 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
get_capacity(gendisk) >> 11);
device_add_disk(&dev->sbd.core, gendisk);
device_add_disk(&dev->sbd.core, gendisk, NULL);
return 0;
fail_cleanup_queue:
blk_cleanup_queue(queue);
blk_mq_free_tag_set(&priv->tag_set);
fail_teardown:
ps3stor_teardown(dev);
fail_free_bounce:
@@ -530,6 +531,7 @@ static int ps3disk_remove(struct ps3_system_bus_device *_dev)
mutex_unlock(&ps3disk_mask_mutex);
del_gendisk(priv->gendisk);
blk_cleanup_queue(priv->queue);
blk_mq_free_tag_set(&priv->tag_set);
put_disk(priv->gendisk);
dev_notice(&dev->sbd.core, "Synchronizing disk cache\n");
ps3disk_sync_cache(dev);

View File

@@ -769,7 +769,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
device_add_disk(&dev->core, gendisk);
device_add_disk(&dev->core, gendisk, NULL);
return 0;
fail_cleanup_queue:

View File

@@ -782,7 +782,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
pci_set_master(dev);
pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
if (st) {
dev_err(CARD_TO_DEV(card),
"No usable DMA configuration,aborting\n");

View File

@@ -276,7 +276,7 @@ static void creg_cmd_done(struct work_struct *work)
st = -EIO;
}
if ((cmd->op == CREG_OP_READ)) {
if (cmd->op == CREG_OP_READ) {
unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
/* Paranoid Sanity Checks */

View File

@@ -226,7 +226,7 @@ int rsxx_attach_dev(struct rsxx_cardinfo *card)
set_capacity(card->gendisk, card->size8 >> 9);
else
set_capacity(card->gendisk, 0);
device_add_disk(CARD_TO_DEV(card), card->gendisk);
device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
card->bdev_attached = 1;
}

View File

@@ -224,12 +224,12 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
{
if (dma->cmd != HW_CMD_BLK_DISCARD) {
if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
pci_unmap_page(ctrl->card->dev, dma->dma_addr,
if (!dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
dma_unmap_page(&ctrl->card->dev->dev, dma->dma_addr,
get_dma_size(dma),
dma->cmd == HW_CMD_BLK_WRITE ?
PCI_DMA_TODEVICE :
PCI_DMA_FROMDEVICE);
DMA_TO_DEVICE :
DMA_FROM_DEVICE);
}
}
@@ -438,23 +438,23 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
if (dma->cmd != HW_CMD_BLK_DISCARD) {
if (dma->cmd == HW_CMD_BLK_WRITE)
dir = PCI_DMA_TODEVICE;
dir = DMA_TO_DEVICE;
else
dir = PCI_DMA_FROMDEVICE;
dir = DMA_FROM_DEVICE;
/*
* The function pci_map_page is placed here because we
* The function dma_map_page is placed here because we
* can only, by design, issue up to 255 commands to the
* hardware at one time per DMA channel. So the maximum
* amount of mapped memory would be 255 * 4 channels *
* 4096 Bytes which is less than 2GB, the limit of a x8
* Non-HWWD PCIe slot. This way the pci_map_page
* Non-HWWD PCIe slot. This way the dma_map_page
* function should never fail because of a lack of
* mappable memory.
*/
dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
dma->dma_addr = dma_map_page(&ctrl->card->dev->dev, dma->page,
dma->pg_off, dma->sub_page.cnt << 9, dir);
if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
if (dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
push_tracker(ctrl->trackers, tag);
rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
continue;
@@ -776,10 +776,10 @@ bvec_err:
/*----------------- DMA Engine Initialization & Setup -------------------*/
int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
{
ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
&ctrl->status.dma_addr);
ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
&ctrl->cmd.dma_addr);
ctrl->status.buf = dma_alloc_coherent(&dev->dev, STATUS_BUFFER_SIZE8,
&ctrl->status.dma_addr, GFP_KERNEL);
ctrl->cmd.buf = dma_alloc_coherent(&dev->dev, COMMAND_BUFFER_SIZE8,
&ctrl->cmd.dma_addr, GFP_KERNEL);
if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
return -ENOMEM;
@@ -962,12 +962,12 @@ failed_dma_setup:
vfree(ctrl->trackers);
if (ctrl->status.buf)
pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
ctrl->status.buf,
ctrl->status.dma_addr);
dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
ctrl->status.buf,
ctrl->status.dma_addr);
if (ctrl->cmd.buf)
pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
ctrl->cmd.buf, ctrl->cmd.dma_addr);
dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
ctrl->cmd.buf, ctrl->cmd.dma_addr);
}
return st;
@@ -1023,10 +1023,10 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
vfree(ctrl->trackers);
pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
ctrl->status.buf, ctrl->status.dma_addr);
pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
ctrl->cmd.buf, ctrl->cmd.dma_addr);
dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
ctrl->status.buf, ctrl->status.dma_addr);
dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
ctrl->cmd.buf, ctrl->cmd.dma_addr);
}
}
@@ -1059,11 +1059,11 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
card->ctrl[i].stats.reads_issued--;
if (dma->cmd != HW_CMD_BLK_DISCARD) {
pci_unmap_page(card->dev, dma->dma_addr,
dma_unmap_page(&card->dev->dev, dma->dma_addr,
get_dma_size(dma),
dma->cmd == HW_CMD_BLK_WRITE ?
PCI_DMA_TODEVICE :
PCI_DMA_FROMDEVICE);
DMA_TO_DEVICE :
DMA_FROM_DEVICE);
}
list_add_tail(&dma->list, &issued_dmas[i]);

View File

@@ -632,7 +632,7 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
* Map scatterlist to PCI bus addresses.
* Note PCI might change the number of entries.
*/
n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir);
if (n_sg <= 0)
return false;
@@ -682,7 +682,8 @@ static void skd_postop_sg_list(struct skd_device *skdev,
skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
skreq->sksg_dma_address +
((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg,
skreq->data_dir);
}
/*
@@ -1416,7 +1417,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_BUSY_IMMINENT:
skd_log_skreq(skdev, skreq, "retry(busy)");
blk_requeue_request(skdev->queue, req);
blk_mq_requeue_request(req, true);
dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
skdev->timer_countdown = SKD_TIMER_MINUTES(20);
@@ -1426,7 +1427,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_REQUEUE_REQUEST:
if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
skd_log_skreq(skdev, skreq, "retry");
blk_requeue_request(skdev->queue, req);
blk_mq_requeue_request(req, true);
break;
}
/* fall through */
@@ -2632,8 +2633,8 @@ static int skd_cons_skcomp(struct skd_device *skdev)
"comp pci_alloc, total bytes %zd entries %d\n",
SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
&skdev->cq_dma_address);
skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
&skdev->cq_dma_address, GFP_KERNEL);
if (skcomp == NULL) {
rc = -ENOMEM;
@@ -2674,10 +2675,10 @@ static int skd_cons_skmsg(struct skd_device *skdev)
skmsg->id = i + SKD_ID_FIT_MSG;
skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
SKD_N_FITMSG_BYTES,
&skmsg->mb_dma_address);
skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev,
SKD_N_FITMSG_BYTES,
&skmsg->mb_dma_address,
GFP_KERNEL);
if (skmsg->msg_buf == NULL) {
rc = -ENOMEM;
goto err_out;
@@ -2971,8 +2972,8 @@ err_out:
static void skd_free_skcomp(struct skd_device *skdev)
{
if (skdev->skcomp_table)
pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
skdev->skcomp_table, skdev->cq_dma_address);
dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
skdev->skcomp_table, skdev->cq_dma_address);
skdev->skcomp_table = NULL;
skdev->cq_dma_address = 0;
@@ -2991,8 +2992,8 @@ static void skd_free_skmsg(struct skd_device *skdev)
skmsg = &skdev->skmsg_table[i];
if (skmsg->msg_buf != NULL) {
pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
skmsg->msg_buf,
dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES,
skmsg->msg_buf,
skmsg->mb_dma_address);
}
skmsg->msg_buf = NULL;
@@ -3104,7 +3105,7 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
{
dev_dbg(&skdev->pdev->dev, "add_disk\n");
device_add_disk(parent, skdev->disk);
device_add_disk(parent, skdev->disk, NULL);
return 0;
}
@@ -3172,18 +3173,12 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out;
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!rc) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
dev_err(&pdev->dev, "consistent DMA mask error %d\n",
rc);
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
}
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
}
if (!skd_major) {
@@ -3367,20 +3362,12 @@ static int skd_pci_resume(struct pci_dev *pdev)
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out;
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!rc) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
dev_err(&pdev->dev, "consistent DMA mask error %d\n",
rc);
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
}
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
}
pci_set_master(pdev);

View File

@@ -857,7 +857,7 @@ static int probe_disk(struct vdc_port *port)
port->vdisk_size, (port->vdisk_size >> (20 - 9)),
port->vio.ver.major, port->vio.ver.minor);
device_add_disk(&port->vio.vdev->dev, g);
device_add_disk(&port->vio.vdev->dev, g, NULL);
return 0;
}

View File

@@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/fd.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/hdreg.h>
#include <linux/kernel.h>
@@ -190,6 +190,7 @@ struct floppy_state {
int ref_count;
struct gendisk *disk;
struct blk_mq_tag_set tag_set;
/* parent controller */
@@ -211,7 +212,6 @@ enum head {
struct swim_priv {
struct swim __iomem *base;
spinlock_t lock;
int fdc_queue;
int floppy_count;
struct floppy_state unit[FD_MAX_UNIT];
};
@@ -525,58 +525,36 @@ static blk_status_t floppy_read_sectors(struct floppy_state *fs,
return 0;
}
static struct request *swim_next_request(struct swim_priv *swd)
static blk_status_t swim_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request_queue *q;
struct request *rq;
int old_pos = swd->fdc_queue;
struct floppy_state *fs = hctx->queue->queuedata;
struct swim_priv *swd = fs->swd;
struct request *req = bd->rq;
blk_status_t err;
if (!spin_trylock_irq(&swd->lock))
return BLK_STS_DEV_RESOURCE;
blk_mq_start_request(req);
if (!fs->disk_in || rq_data_dir(req) == WRITE) {
err = BLK_STS_IOERR;
goto out;
}
do {
q = swd->unit[swd->fdc_queue].disk->queue;
if (++swd->fdc_queue == swd->floppy_count)
swd->fdc_queue = 0;
if (q) {
rq = blk_fetch_request(q);
if (rq)
return rq;
}
} while (swd->fdc_queue != old_pos);
err = floppy_read_sectors(fs, blk_rq_pos(req),
blk_rq_cur_sectors(req),
bio_data(req->bio));
} while (blk_update_request(req, err, blk_rq_cur_bytes(req)));
__blk_mq_end_request(req, err);
return NULL;
}
err = BLK_STS_OK;
out:
spin_unlock_irq(&swd->lock);
return err;
static void do_fd_request(struct request_queue *q)
{
struct swim_priv *swd = q->queuedata;
struct request *req;
struct floppy_state *fs;
req = swim_next_request(swd);
while (req) {
blk_status_t err = BLK_STS_IOERR;
fs = req->rq_disk->private_data;
if (blk_rq_pos(req) >= fs->total_secs)
goto done;
if (!fs->disk_in)
goto done;
if (rq_data_dir(req) == WRITE && fs->write_protected)
goto done;
switch (rq_data_dir(req)) {
case WRITE:
/* NOT IMPLEMENTED */
break;
case READ:
err = floppy_read_sectors(fs, blk_rq_pos(req),
blk_rq_cur_sectors(req),
bio_data(req->bio));
break;
}
done:
if (!__blk_end_request_cur(req, err))
req = swim_next_request(swd);
}
}
static struct floppy_struct floppy_type[4] = {
@@ -823,6 +801,10 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
return 0;
}
static const struct blk_mq_ops swim_mq_ops = {
.queue_rq = swim_queue_rq,
};
static int swim_floppy_init(struct swim_priv *swd)
{
int err;
@@ -852,20 +834,25 @@ static int swim_floppy_init(struct swim_priv *swd)
spin_lock_init(&swd->lock);
for (drive = 0; drive < swd->floppy_count; drive++) {
struct request_queue *q;
swd->unit[drive].disk = alloc_disk(1);
if (swd->unit[drive].disk == NULL) {
err = -ENOMEM;
goto exit_put_disks;
}
swd->unit[drive].disk->queue = blk_init_queue(do_fd_request,
&swd->lock);
if (!swd->unit[drive].disk->queue) {
err = -ENOMEM;
q = blk_mq_init_sq_queue(&swd->unit[drive].tag_set, &swim_mq_ops,
2, BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(q)) {
err = PTR_ERR(q);
goto exit_put_disks;
}
swd->unit[drive].disk->queue = q;
blk_queue_bounce_limit(swd->unit[drive].disk->queue,
BLK_BOUNCE_HIGH);
swd->unit[drive].disk->queue->queuedata = swd;
swd->unit[drive].disk->queue->queuedata = &swd->unit[drive];
swd->unit[drive].swd = swd;
}
@@ -887,8 +874,18 @@ static int swim_floppy_init(struct swim_priv *swd)
exit_put_disks:
unregister_blkdev(FLOPPY_MAJOR, "fd");
while (drive--)
put_disk(swd->unit[drive].disk);
do {
struct gendisk *disk = swd->unit[drive].disk;
if (disk) {
if (disk->queue) {
blk_cleanup_queue(disk->queue);
disk->queue = NULL;
}
blk_mq_free_tag_set(&swd->unit[drive].tag_set);
put_disk(disk);
}
} while (drive--);
return err;
}
@@ -961,6 +958,7 @@ static int swim_remove(struct platform_device *dev)
for (drive = 0; drive < swd->floppy_count; drive++) {
del_gendisk(swd->unit[drive].disk);
blk_cleanup_queue(swd->unit[drive].disk->queue);
blk_mq_free_tag_set(&swd->unit[drive].tag_set);
put_disk(swd->unit[drive].disk);
}

View File

@@ -25,7 +25,7 @@
#include <linux/delay.h>
#include <linux/fd.h>
#include <linux/ioctl.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -206,6 +206,7 @@ struct floppy_state {
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
int index;
struct request *cur_req;
struct blk_mq_tag_set tag_set;
};
#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
@@ -260,16 +261,15 @@ static int floppy_revalidate(struct gendisk *disk);
static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
{
struct request *req = fs->cur_req;
int rc;
swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
err, nr_bytes, req);
if (err)
nr_bytes = blk_rq_cur_bytes(req);
rc = __blk_end_request(req, err, nr_bytes);
if (rc)
if (blk_update_request(req, err, nr_bytes))
return true;
__blk_mq_end_request(req, err);
fs->cur_req = NULL;
return false;
}
@@ -309,86 +309,58 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
return (stat & DATA) == 0;
}
static void start_request(struct floppy_state *fs)
static blk_status_t swim3_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *req;
struct floppy_state *fs = hctx->queue->queuedata;
struct request *req = bd->rq;
unsigned long x;
swim3_dbg("start request, initial state=%d\n", fs->state);
if (fs->state == idle && fs->wanted) {
fs->state = available;
wake_up(&fs->wait);
return;
spin_lock_irq(&swim3_lock);
if (fs->cur_req || fs->state != idle) {
spin_unlock_irq(&swim3_lock);
return BLK_STS_DEV_RESOURCE;
}
while (fs->state == idle) {
swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
if (!fs->cur_req) {
fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
swim3_dbg(" fetched request %p\n", fs->cur_req);
if (!fs->cur_req)
break;
}
req = fs->cur_req;
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD) {
swim3_dbg("%s", " media bay absent, dropping req\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
continue;
}
#if 0 /* This is really too verbose */
swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
req->rq_disk->disk_name, req->cmd,
(long)blk_rq_pos(req), blk_rq_sectors(req),
bio_data(req->bio));
swim3_dbg(" current_nr_sectors=%u\n",
blk_rq_cur_sectors(req));
#endif
if (blk_rq_pos(req) >= fs->total_secs) {
swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
(long)blk_rq_pos(req), (long)fs->total_secs);
swim3_end_request(fs, BLK_STS_IOERR, 0);
continue;
}
if (fs->ejected) {
swim3_dbg("%s", " disk ejected\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
continue;
}
if (rq_data_dir(req) == WRITE) {
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
swim3_dbg("%s", " try to write, disk write protected\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
continue;
}
}
/* Do not remove the cast. blk_rq_pos(req) is now a
* sector_t and can be 64 bits, but it will never go
* past 32 bits for this driver anyway, so we can
* safely cast it down and not have to do a 64/32
* division
*/
fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
fs->state = do_transfer;
fs->retries = 0;
act(fs);
blk_mq_start_request(req);
fs->cur_req = req;
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD) {
swim3_dbg("%s", " media bay absent, dropping req\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
goto out;
}
if (fs->ejected) {
swim3_dbg("%s", " disk ejected\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
goto out;
}
if (rq_data_dir(req) == WRITE) {
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
swim3_dbg("%s", " try to write, disk write protected\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
goto out;
}
}
}
static void do_fd_request(struct request_queue * q)
{
start_request(q->queuedata);
/*
* Do not remove the cast. blk_rq_pos(req) is now a sector_t and can be
* 64 bits, but it will never go past 32 bits for this driver anyway, so
* we can safely cast it down and not have to do a 64/32 division
*/
fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
fs->state = do_transfer;
fs->retries = 0;
act(fs);
out:
spin_unlock_irq(&swim3_lock);
return BLK_STS_OK;
}
static void set_timeout(struct floppy_state *fs, int nticks,
@@ -585,7 +557,6 @@ static void scan_timeout(struct timer_list *t)
if (fs->retries > 5) {
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
@@ -609,7 +580,6 @@ static void seek_timeout(struct timer_list *t)
swim3_err("%s", "Seek timeout\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
}
@@ -638,7 +608,6 @@ static void settle_timeout(struct timer_list *t)
swim3_err("%s", "Seek settle timeout\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
unlock:
spin_unlock_irqrestore(&swim3_lock, flags);
}
@@ -667,7 +636,6 @@ static void xfer_timeout(struct timer_list *t)
(long)blk_rq_pos(fs->cur_req));
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
}
@@ -704,7 +672,6 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (fs->retries > 5) {
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
@@ -796,7 +763,6 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->state, rq_data_dir(req), intr, err);
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
break;
}
fs->retries = 0;
@@ -813,8 +779,6 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
} else
fs->state = idle;
}
if (fs->state == idle)
start_request(fs);
break;
default:
swim3_err("Don't know what to do in state %d\n", fs->state);
@@ -862,14 +826,19 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
static void release_drive(struct floppy_state *fs)
{
struct request_queue *q = disks[fs->index]->queue;
unsigned long flags;
swim3_dbg("%s", "-> release drive\n");
spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
}
static int fd_eject(struct floppy_state *fs)
@@ -1089,6 +1058,10 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
static const struct blk_mq_ops swim3_mq_ops = {
.queue_rq = swim3_queue_rq,
};
static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
{
struct floppy_state *fs = macio_get_drvdata(mdev);
@@ -1202,47 +1175,63 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int swim3_attach(struct macio_dev *mdev,
const struct of_device_id *match)
{
struct floppy_state *fs;
struct gendisk *disk;
int index, rc;
int rc;
index = floppy_count++;
if (index >= MAX_FLOPPIES)
if (floppy_count >= MAX_FLOPPIES)
return -ENXIO;
/* Add the drive */
rc = swim3_add_device(mdev, index);
if (rc)
return rc;
/* Now register that disk. Same comment about failure handling */
disk = disks[index] = alloc_disk(1);
if (disk == NULL)
return -ENOMEM;
disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
if (disk->queue == NULL) {
put_disk(disk);
return -ENOMEM;
if (floppy_count == 0) {
rc = register_blkdev(FLOPPY_MAJOR, "fd");
if (rc)
return rc;
}
fs = &floppy_states[floppy_count];
disk = alloc_disk(1);
if (disk == NULL) {
rc = -ENOMEM;
goto out_unregister;
}
disk->queue = blk_mq_init_sq_queue(&fs->tag_set, &swim3_mq_ops, 2,
BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disk->queue)) {
rc = PTR_ERR(disk->queue);
disk->queue = NULL;
goto out_put_disk;
}
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
disk->queue->queuedata = &floppy_states[index];
disk->queue->queuedata = fs;
if (index == 0) {
/* If we failed, there isn't much we can do as the driver is still
* too dumb to remove the device, just bail out
*/
if (register_blkdev(FLOPPY_MAJOR, "fd"))
return 0;
}
rc = swim3_add_device(mdev, floppy_count);
if (rc)
goto out_cleanup_queue;
disk->major = FLOPPY_MAJOR;
disk->first_minor = index;
disk->first_minor = floppy_count;
disk->fops = &floppy_fops;
disk->private_data = &floppy_states[index];
disk->private_data = fs;
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", index);
sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
add_disk(disk);
disks[floppy_count++] = disk;
return 0;
out_cleanup_queue:
blk_cleanup_queue(disk->queue);
disk->queue = NULL;
blk_mq_free_tag_set(&fs->tag_set);
out_put_disk:
put_disk(disk);
out_unregister:
if (floppy_count == 0)
unregister_blkdev(FLOPPY_MAJOR, "fd");
return rc;
}
static const struct of_device_id swim3_match[] =

View File

@@ -16,7 +16,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
@@ -197,7 +197,6 @@ enum {
FL_NON_RAID = FW_VER_NON_RAID,
FL_4PORT = FW_VER_4PORT,
FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT),
FL_DAC = (1 << 16),
FL_DYN_MAJOR = (1 << 17),
};
@@ -244,6 +243,7 @@ struct carm_port {
unsigned int port_no;
struct gendisk *disk;
struct carm_host *host;
struct blk_mq_tag_set tag_set;
/* attached device characteristics */
u64 capacity;
@@ -279,6 +279,7 @@ struct carm_host {
unsigned int state;
u32 fw_ver;
struct blk_mq_tag_set tag_set;
struct request_queue *oob_q;
unsigned int n_oob;
@@ -750,7 +751,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
struct request *req = crq->rq;
int rc;
__blk_end_request_all(req, error);
blk_mq_end_request(req, error);
rc = carm_put_request(host, crq);
assert(rc == 0);
@@ -760,7 +761,7 @@ static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
{
unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
blk_stop_queue(q);
blk_mq_stop_hw_queues(q);
VPRINTK("STOPPED QUEUE %p\n", q);
host->wait_q[idx] = q;
@@ -785,7 +786,7 @@ static inline void carm_round_robin(struct carm_host *host)
{
struct request_queue *q = carm_pop_q(host);
if (q) {
blk_start_queue(q);
blk_mq_start_hw_queues(q);
VPRINTK("STARTED QUEUE %p\n", q);
}
}
@@ -802,82 +803,86 @@ static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
}
}
static void carm_oob_rq_fn(struct request_queue *q)
static blk_status_t carm_oob_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request_queue *q = hctx->queue;
struct carm_host *host = q->queuedata;
struct carm_request *crq;
struct request *rq;
int rc;
while (1) {
DPRINTK("get req\n");
rq = blk_fetch_request(q);
if (!rq)
break;
blk_mq_start_request(bd->rq);
crq = rq->special;
assert(crq != NULL);
assert(crq->rq == rq);
spin_lock_irq(&host->lock);
crq->n_elem = 0;
crq = bd->rq->special;
assert(crq != NULL);
assert(crq->rq == bd->rq);
DPRINTK("send req\n");
rc = carm_send_msg(host, crq);
if (rc) {
blk_requeue_request(q, rq);
carm_push_q(host, q);
return; /* call us again later, eventually */
}
crq->n_elem = 0;
DPRINTK("send req\n");
rc = carm_send_msg(host, crq);
if (rc) {
carm_push_q(host, q);
spin_unlock_irq(&host->lock);
return BLK_STS_DEV_RESOURCE;
}
spin_unlock_irq(&host->lock);
return BLK_STS_OK;
}
static void carm_rq_fn(struct request_queue *q)
static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request_queue *q = hctx->queue;
struct carm_port *port = q->queuedata;
struct carm_host *host = port->host;
struct carm_msg_rw *msg;
struct carm_request *crq;
struct request *rq;
struct request *rq = bd->rq;
struct scatterlist *sg;
int writing = 0, pci_dir, i, n_elem, rc;
u32 tmp;
unsigned int msg_size;
queue_one_request:
VPRINTK("get req\n");
rq = blk_peek_request(q);
if (!rq)
return;
blk_mq_start_request(rq);
spin_lock_irq(&host->lock);
crq = carm_get_request(host);
if (!crq) {
carm_push_q(host, q);
return; /* call us again later, eventually */
spin_unlock_irq(&host->lock);
return BLK_STS_DEV_RESOURCE;
}
crq->rq = rq;
blk_start_request(rq);
if (rq_data_dir(rq) == WRITE) {
writing = 1;
pci_dir = PCI_DMA_TODEVICE;
pci_dir = DMA_TO_DEVICE;
} else {
pci_dir = PCI_DMA_FROMDEVICE;
pci_dir = DMA_FROM_DEVICE;
}
/* get scatterlist from block layer */
sg = &crq->sg[0];
n_elem = blk_rq_map_sg(q, rq, sg);
if (n_elem <= 0) {
/* request with no s/g entries? */
carm_end_rq(host, crq, BLK_STS_IOERR);
return; /* request with no s/g entries? */
spin_unlock_irq(&host->lock);
return BLK_STS_IOERR;
}
/* map scatterlist to PCI bus addresses */
n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, pci_dir);
if (n_elem <= 0) {
/* request with no s/g entries? */
carm_end_rq(host, crq, BLK_STS_IOERR);
return; /* request with no s/g entries? */
spin_unlock_irq(&host->lock);
return BLK_STS_IOERR;
}
crq->n_elem = n_elem;
crq->port = port;
@@ -927,12 +932,13 @@ queue_one_request:
rc = carm_send_msg(host, crq);
if (rc) {
carm_put_request(host, crq);
blk_requeue_request(q, rq);
carm_push_q(host, q);
return; /* call us again later, eventually */
spin_unlock_irq(&host->lock);
return BLK_STS_DEV_RESOURCE;
}
goto queue_one_request;
spin_unlock_irq(&host->lock);
return BLK_STS_OK;
}
static void carm_handle_array_info(struct carm_host *host,
@@ -1052,11 +1058,11 @@ static inline void carm_handle_rw(struct carm_host *host,
VPRINTK("ENTER\n");
if (rq_data_dir(crq->rq) == WRITE)
pci_dir = PCI_DMA_TODEVICE;
pci_dir = DMA_TO_DEVICE;
else
pci_dir = PCI_DMA_FROMDEVICE;
pci_dir = DMA_FROM_DEVICE;
pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir);
dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem, pci_dir);
carm_end_rq(host, crq, error);
}
@@ -1485,6 +1491,14 @@ static int carm_init_host(struct carm_host *host)
return 0;
}
static const struct blk_mq_ops carm_oob_mq_ops = {
.queue_rq = carm_oob_queue_rq,
};
static const struct blk_mq_ops carm_mq_ops = {
.queue_rq = carm_queue_rq,
};
static int carm_init_disks(struct carm_host *host)
{
unsigned int i;
@@ -1513,9 +1527,10 @@ static int carm_init_disks(struct carm_host *host)
disk->fops = &carm_bd_ops;
disk->private_data = port;
q = blk_init_queue(carm_rq_fn, &host->lock);
if (!q) {
rc = -ENOMEM;
q = blk_mq_init_sq_queue(&port->tag_set, &carm_mq_ops,
max_queue, BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(q)) {
rc = PTR_ERR(q);
break;
}
disk->queue = q;
@@ -1533,14 +1548,18 @@ static void carm_free_disks(struct carm_host *host)
unsigned int i;
for (i = 0; i < CARM_MAX_PORTS; i++) {
struct gendisk *disk = host->port[i].disk;
struct carm_port *port = &host->port[i];
struct gendisk *disk = port->disk;
if (disk) {
struct request_queue *q = disk->queue;
if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
if (q)
if (q) {
blk_mq_free_tag_set(&port->tag_set);
blk_cleanup_queue(q);
}
put_disk(disk);
}
}
@@ -1548,8 +1567,8 @@ static void carm_free_disks(struct carm_host *host)
static int carm_init_shm(struct carm_host *host)
{
host->shm = pci_alloc_consistent(host->pdev, CARM_SHM_SIZE,
&host->shm_dma);
host->shm = dma_alloc_coherent(&host->pdev->dev, CARM_SHM_SIZE,
&host->shm_dma, GFP_KERNEL);
if (!host->shm)
return -ENOMEM;
@@ -1565,7 +1584,6 @@ static int carm_init_shm(struct carm_host *host)
static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct carm_host *host;
unsigned int pci_dac;
int rc;
struct request_queue *q;
unsigned int i;
@@ -1580,28 +1598,12 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_out;
#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
pci_name(pdev));
goto err_out_regions;
}
pci_dac = 1;
} else {
#endif
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
pci_name(pdev));
goto err_out_regions;
}
pci_dac = 0;
#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
pci_name(pdev));
goto err_out_regions;
}
#endif
host = kzalloc(sizeof(*host), GFP_KERNEL);
if (!host) {
@@ -1612,7 +1614,6 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
}
host->pdev = pdev;
host->flags = pci_dac ? FL_DAC : 0;
spin_lock_init(&host->lock);
INIT_WORK(&host->fsm_task, carm_fsm_task);
init_completion(&host->probe_comp);
@@ -1636,12 +1637,13 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
q = blk_init_queue(carm_oob_rq_fn, &host->lock);
if (!q) {
q = blk_mq_init_sq_queue(&host->tag_set, &carm_oob_mq_ops, 1,
BLK_MQ_F_NO_SCHED);
if (IS_ERR(q)) {
printk(KERN_ERR DRV_NAME "(%s): OOB queue alloc failure\n",
pci_name(pdev));
rc = -ENOMEM;
goto err_out_pci_free;
rc = PTR_ERR(q);
goto err_out_dma_free;
}
host->oob_q = q;
q->queuedata = host;
@@ -1705,8 +1707,9 @@ err_out_free_majors:
else if (host->major == 161)
clear_bit(1, &carm_major_alloc);
blk_cleanup_queue(host->oob_q);
err_out_pci_free:
pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
blk_mq_free_tag_set(&host->tag_set);
err_out_dma_free:
dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
err_out_iounmap:
iounmap(host->mmio);
err_out_kfree:
@@ -1736,7 +1739,8 @@ static void carm_remove_one (struct pci_dev *pdev)
else if (host->major == 161)
clear_bit(1, &carm_major_alloc);
blk_cleanup_queue(host->oob_q);
pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
blk_mq_free_tag_set(&host->tag_set);
dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
iounmap(host->mmio);
kfree(host);
pci_release_regions(pdev);

View File

@@ -363,12 +363,12 @@ static int add_bio(struct cardinfo *card)
vec = bio_iter_iovec(bio, card->current_iter);
dma_handle = pci_map_page(card->dev,
dma_handle = dma_map_page(&card->dev->dev,
vec.bv_page,
vec.bv_offset,
vec.bv_len,
bio_op(bio) == REQ_OP_READ ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
DMA_FROM_DEVICE : DMA_TO_DEVICE);
p = &card->mm_pages[card->Ready];
desc = &p->desc[p->cnt];
@@ -421,7 +421,7 @@ static void process_page(unsigned long data)
struct cardinfo *card = (struct cardinfo *)data;
unsigned int dma_status = card->dma_status;
spin_lock_bh(&card->lock);
spin_lock(&card->lock);
if (card->Active < 0)
goto out_unlock;
page = &card->mm_pages[card->Active];
@@ -448,10 +448,10 @@ static void process_page(unsigned long data)
page->iter = page->bio->bi_iter;
}
pci_unmap_page(card->dev, desc->data_dma_handle,
dma_unmap_page(&card->dev->dev, desc->data_dma_handle,
vec.bv_len,
(control & DMASCR_TRANSFER_READ) ?
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (control & DMASCR_HARD_ERROR) {
/* error */
bio->bi_status = BLK_STS_IOERR;
@@ -496,7 +496,7 @@ static void process_page(unsigned long data)
mm_start_io(card);
}
out_unlock:
spin_unlock_bh(&card->lock);
spin_unlock(&card->lock);
while (return_bio) {
struct bio *bio = return_bio;
@@ -817,8 +817,8 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_printk(KERN_INFO, &dev->dev,
"Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n");
if (pci_set_dma_mask(dev, DMA_BIT_MASK(64)) &&
pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
if (dma_set_mask(&dev->dev, DMA_BIT_MASK(64)) &&
dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) {
dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n");
return -ENOMEM;
}
@@ -871,12 +871,10 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto failed_magic;
}
card->mm_pages[0].desc = pci_alloc_consistent(card->dev,
PAGE_SIZE * 2,
&card->mm_pages[0].page_dma);
card->mm_pages[1].desc = pci_alloc_consistent(card->dev,
PAGE_SIZE * 2,
&card->mm_pages[1].page_dma);
card->mm_pages[0].desc = dma_alloc_coherent(&card->dev->dev,
PAGE_SIZE * 2, &card->mm_pages[0].page_dma, GFP_KERNEL);
card->mm_pages[1].desc = dma_alloc_coherent(&card->dev->dev,
PAGE_SIZE * 2, &card->mm_pages[1].page_dma, GFP_KERNEL);
if (card->mm_pages[0].desc == NULL ||
card->mm_pages[1].desc == NULL) {
dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
@@ -1002,13 +1000,13 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
failed_req_irq:
failed_alloc:
if (card->mm_pages[0].desc)
pci_free_consistent(card->dev, PAGE_SIZE*2,
card->mm_pages[0].desc,
card->mm_pages[0].page_dma);
dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
card->mm_pages[0].desc,
card->mm_pages[0].page_dma);
if (card->mm_pages[1].desc)
pci_free_consistent(card->dev, PAGE_SIZE*2,
card->mm_pages[1].desc,
card->mm_pages[1].page_dma);
dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
card->mm_pages[1].desc,
card->mm_pages[1].page_dma);
failed_magic:
iounmap(card->csr_remap);
failed_remap_csr:
@@ -1027,11 +1025,11 @@ static void mm_pci_remove(struct pci_dev *dev)
iounmap(card->csr_remap);
if (card->mm_pages[0].desc)
pci_free_consistent(card->dev, PAGE_SIZE*2,
dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
card->mm_pages[0].desc,
card->mm_pages[0].page_dma);
if (card->mm_pages[1].desc)
pci_free_consistent(card->dev, PAGE_SIZE*2,
dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
card->mm_pages[1].desc,
card->mm_pages[1].page_dma);
blk_cleanup_queue(card->queue);

View File

@@ -351,8 +351,8 @@ static int minor_to_index(int minor)
return minor >> PART_BITS;
}
static ssize_t virtblk_serial_show(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t serial_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
int err;
@@ -371,7 +371,7 @@ static ssize_t virtblk_serial_show(struct device *dev,
return err;
}
static DEVICE_ATTR(serial, 0444, virtblk_serial_show, NULL);
static DEVICE_ATTR_RO(serial);
/* The queue's logical block size must be set before calling this */
static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
@@ -545,8 +545,8 @@ static const char *const virtblk_cache_types[] = {
};
static ssize_t
virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
cache_type_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
@@ -564,8 +564,7 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
@@ -575,12 +574,38 @@ virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
}
static const struct device_attribute dev_attr_cache_type_ro =
__ATTR(cache_type, 0444,
virtblk_cache_type_show, NULL);
static const struct device_attribute dev_attr_cache_type_rw =
__ATTR(cache_type, 0644,
virtblk_cache_type_show, virtblk_cache_type_store);
static DEVICE_ATTR_RW(cache_type);
static struct attribute *virtblk_attrs[] = {
&dev_attr_serial.attr,
&dev_attr_cache_type.attr,
NULL,
};
static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
struct virtio_device *vdev = vblk->vdev;
if (a == &dev_attr_cache_type.attr &&
!virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
return S_IRUGO;
return a->mode;
}
static const struct attribute_group virtblk_attr_group = {
.attrs = virtblk_attrs,
.is_visible = virtblk_attrs_are_visible,
};
static const struct attribute_group *virtblk_attr_groups[] = {
&virtblk_attr_group,
NULL,
};
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
@@ -780,24 +805,9 @@ static int virtblk_probe(struct virtio_device *vdev)
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
device_add_disk(&vdev->dev, vblk->disk);
err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
if (err)
goto out_del_disk;
if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
err = device_create_file(disk_to_dev(vblk->disk),
&dev_attr_cache_type_rw);
else
err = device_create_file(disk_to_dev(vblk->disk),
&dev_attr_cache_type_ro);
if (err)
goto out_del_disk;
device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
return 0;
out_del_disk:
del_gendisk(vblk->disk);
blk_cleanup_queue(vblk->disk->queue);
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);
out_put_disk:

View File

@@ -2420,7 +2420,7 @@ static void blkfront_connect(struct blkfront_info *info)
for (i = 0; i < info->nr_rings; i++)
kick_pending_request_queues(&info->rinfo[i]);
device_add_disk(&info->xbdev->dev, info->gd);
device_add_disk(&info->xbdev->dev, info->gd, NULL);
info->is_ready = 1;
return;

View File

@@ -88,7 +88,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/ata.h>
#include <linux/hdreg.h>
@@ -209,6 +209,8 @@ struct ace_device {
struct device *dev;
struct request_queue *queue;
struct gendisk *gd;
struct blk_mq_tag_set tag_set;
struct list_head rq_list;
/* Inserted CF card parameters */
u16 cf_id[ATA_ID_WORDS];
@@ -462,18 +464,26 @@ static inline void ace_fsm_yieldirq(struct ace_device *ace)
ace->fsm_continue_flag = 0;
}
static bool ace_has_next_request(struct request_queue *q)
{
struct ace_device *ace = q->queuedata;
return !list_empty(&ace->rq_list);
}
/* Get the next read/write request; ending requests that we don't handle */
static struct request *ace_get_next_request(struct request_queue *q)
{
struct request *req;
struct ace_device *ace = q->queuedata;
struct request *rq;
while ((req = blk_peek_request(q)) != NULL) {
if (!blk_rq_is_passthrough(req))
break;
blk_start_request(req);
__blk_end_request_all(req, BLK_STS_IOERR);
rq = list_first_entry_or_null(&ace->rq_list, struct request, queuelist);
if (rq) {
list_del_init(&rq->queuelist);
blk_mq_start_request(rq);
}
return req;
return NULL;
}
static void ace_fsm_dostate(struct ace_device *ace)
@@ -499,11 +509,11 @@ static void ace_fsm_dostate(struct ace_device *ace)
/* Drop all in-flight and pending requests */
if (ace->req) {
__blk_end_request_all(ace->req, BLK_STS_IOERR);
blk_mq_end_request(ace->req, BLK_STS_IOERR);
ace->req = NULL;
}
while ((req = blk_fetch_request(ace->queue)) != NULL)
__blk_end_request_all(req, BLK_STS_IOERR);
while ((req = ace_get_next_request(ace->queue)) != NULL)
blk_mq_end_request(req, BLK_STS_IOERR);
/* Drop back to IDLE state and notify waiters */
ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -517,7 +527,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
switch (ace->fsm_state) {
case ACE_FSM_STATE_IDLE:
/* See if there is anything to do */
if (ace->id_req_count || ace_get_next_request(ace->queue)) {
if (ace->id_req_count || ace_has_next_request(ace->queue)) {
ace->fsm_iter_num++;
ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
mod_timer(&ace->stall_timer, jiffies + HZ);
@@ -651,7 +661,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
ace->fsm_state = ACE_FSM_STATE_IDLE;
break;
}
blk_start_request(req);
/* Okay, it's a data request, set it up for transfer */
dev_dbg(ace->dev,
@@ -728,7 +737,8 @@ static void ace_fsm_dostate(struct ace_device *ace)
}
/* bio finished; is there another one? */
if (__blk_end_request_cur(ace->req, BLK_STS_OK)) {
if (blk_update_request(ace->req, BLK_STS_OK,
blk_rq_cur_bytes(ace->req))) {
/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
* blk_rq_sectors(ace->req),
* blk_rq_cur_sectors(ace->req));
@@ -854,17 +864,23 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
/* ---------------------------------------------------------------------
* Block ops
*/
static void ace_request(struct request_queue * q)
static blk_status_t ace_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *req;
struct ace_device *ace;
struct ace_device *ace = hctx->queue->queuedata;
struct request *req = bd->rq;
req = ace_get_next_request(q);
if (req) {
ace = req->rq_disk->private_data;
tasklet_schedule(&ace->fsm_tasklet);
if (blk_rq_is_passthrough(req)) {
blk_mq_start_request(req);
return BLK_STS_IOERR;
}
spin_lock_irq(&ace->lock);
list_add_tail(&req->queuelist, &ace->rq_list);
spin_unlock_irq(&ace->lock);
tasklet_schedule(&ace->fsm_tasklet);
return BLK_STS_OK;
}
static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing)
@@ -957,6 +973,10 @@ static const struct block_device_operations ace_fops = {
.getgeo = ace_getgeo,
};
static const struct blk_mq_ops ace_mq_ops = {
.queue_rq = ace_queue_rq,
};
/* --------------------------------------------------------------------
* SystemACE device setup/teardown code
*/
@@ -972,6 +992,7 @@ static int ace_setup(struct ace_device *ace)
spin_lock_init(&ace->lock);
init_completion(&ace->id_completion);
INIT_LIST_HEAD(&ace->rq_list);
/*
* Map the device
@@ -989,9 +1010,15 @@ static int ace_setup(struct ace_device *ace)
/*
* Initialize the request queue
*/
ace->queue = blk_init_queue(ace_request, &ace->lock);
if (ace->queue == NULL)
ace->queue = blk_mq_init_sq_queue(&ace->tag_set, &ace_mq_ops, 2,
BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(ace->queue)) {
rc = PTR_ERR(ace->queue);
ace->queue = NULL;
goto err_blk_initq;
}
ace->queue->queuedata = ace;
blk_queue_logical_block_size(ace->queue, 512);
blk_queue_bounce_limit(ace->queue, BLK_BOUNCE_HIGH);
@@ -1066,6 +1093,7 @@ err_read:
put_disk(ace->gd);
err_alloc_disk:
blk_cleanup_queue(ace->queue);
blk_mq_free_tag_set(&ace->tag_set);
err_blk_initq:
iounmap(ace->baseaddr);
err_ioremap:
@@ -1081,8 +1109,10 @@ static void ace_teardown(struct ace_device *ace)
put_disk(ace->gd);
}
if (ace->queue)
if (ace->queue) {
blk_cleanup_queue(ace->queue);
blk_mq_free_tag_set(&ace->tag_set);
}
tasklet_kill(&ace->fsm_tasklet);

View File

@@ -31,7 +31,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -66,43 +66,44 @@ static DEFINE_SPINLOCK(z2ram_lock);
static struct gendisk *z2ram_gendisk;
static void do_z2_request(struct request_queue *q)
static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *req;
struct request *req = bd->rq;
unsigned long start = blk_rq_pos(req) << 9;
unsigned long len = blk_rq_cur_bytes(req);
req = blk_fetch_request(q);
while (req) {
unsigned long start = blk_rq_pos(req) << 9;
unsigned long len = blk_rq_cur_bytes(req);
blk_status_t err = BLK_STS_OK;
blk_mq_start_request(req);
if (start + len > z2ram_size) {
pr_err(DEVICE_NAME ": bad access: block=%llu, "
"count=%u\n",
(unsigned long long)blk_rq_pos(req),
blk_rq_cur_sectors(req));
err = BLK_STS_IOERR;
goto done;
}
while (len) {
unsigned long addr = start & Z2RAM_CHUNKMASK;
unsigned long size = Z2RAM_CHUNKSIZE - addr;
void *buffer = bio_data(req->bio);
if (len < size)
size = len;
addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
if (rq_data_dir(req) == READ)
memcpy(buffer, (char *)addr, size);
else
memcpy((char *)addr, buffer, size);
start += size;
len -= size;
}
done:
if (!__blk_end_request_cur(req, err))
req = blk_fetch_request(q);
if (start + len > z2ram_size) {
pr_err(DEVICE_NAME ": bad access: block=%llu, "
"count=%u\n",
(unsigned long long)blk_rq_pos(req),
blk_rq_cur_sectors(req));
return BLK_STS_IOERR;
}
spin_lock_irq(&z2ram_lock);
while (len) {
unsigned long addr = start & Z2RAM_CHUNKMASK;
unsigned long size = Z2RAM_CHUNKSIZE - addr;
void *buffer = bio_data(req->bio);
if (len < size)
size = len;
addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
if (rq_data_dir(req) == READ)
memcpy(buffer, (char *)addr, size);
else
memcpy((char *)addr, buffer, size);
start += size;
len -= size;
}
spin_unlock_irq(&z2ram_lock);
blk_mq_end_request(req, BLK_STS_OK);
return BLK_STS_OK;
}
static void
@@ -337,6 +338,11 @@ static struct kobject *z2_find(dev_t dev, int *part, void *data)
}
static struct request_queue *z2_queue;
static struct blk_mq_tag_set tag_set;
static const struct blk_mq_ops z2_mq_ops = {
.queue_rq = z2_queue_rq,
};
static int __init
z2_init(void)
@@ -355,9 +361,13 @@ z2_init(void)
if (!z2ram_gendisk)
goto out_disk;
z2_queue = blk_init_queue(do_z2_request, &z2ram_lock);
if (!z2_queue)
z2_queue = blk_mq_init_sq_queue(&tag_set, &z2_mq_ops, 16,
BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(z2_queue)) {
ret = PTR_ERR(z2_queue);
z2_queue = NULL;
goto out_queue;
}
z2ram_gendisk->major = Z2RAM_MAJOR;
z2ram_gendisk->first_minor = 0;
@@ -387,6 +397,7 @@ static void __exit z2_exit(void)
del_gendisk(z2ram_gendisk);
put_disk(z2ram_gendisk);
blk_cleanup_queue(z2_queue);
blk_mq_free_tag_set(&tag_set);
if ( current_device != -1 )
{

View File

@@ -3,7 +3,6 @@ config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO
select CRYPTO_LZO
default n
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
Pages written to these disks are compressed and stored in memory
@@ -18,7 +17,6 @@ config ZRAM
config ZRAM_WRITEBACK
bool "Write back incompressible page to backing device"
depends on ZRAM
default n
help
With incompressible page, there is no memory saving to keep it
in memory. Instead, write it out to backing device.

View File

@@ -1636,6 +1636,11 @@ static const struct attribute_group zram_disk_attr_group = {
.attrs = zram_disk_attrs,
};
static const struct attribute_group *zram_disk_attr_groups[] = {
&zram_disk_attr_group,
NULL,
};
/*
* Allocate and initialize new zram device. the function returns
* '>= 0' device_id upon success, and negative value otherwise.
@@ -1716,24 +1721,14 @@ static int zram_add(void)
zram->disk->queue->backing_dev_info->capabilities |=
(BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
add_disk(zram->disk);
device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
&zram_disk_attr_group);
if (ret < 0) {
pr_err("Error creating sysfs group for device %d\n",
device_id);
goto out_free_disk;
}
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
out_free_disk:
del_gendisk(zram->disk);
put_disk(zram->disk);
out_free_queue:
blk_cleanup_queue(queue);
out_free_idr:
@@ -1762,15 +1757,6 @@ static int zram_remove(struct zram *zram)
mutex_unlock(&bdev->bd_mutex);
zram_debugfs_unregister(zram);
/*
* Remove sysfs first, so no one will perform a disksize
* store while we destroy the devices. This also helps during
* hot_remove -- zram_reset_device() is the last holder of
* ->init_lock, no later/concurrent disksize_store() or any
* other sysfs handlers are possible.
*/
sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
&zram_disk_attr_group);
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);