Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
Version bump conflict in batman-adv, take what's in net-next. iavf conflict, adjustment of netdev_ops in net-next conflicting with poll controller method removal in net. Signed-off-by: David S. Miller <davem@davemloft.net>
Цей коміт міститься в:
@@ -3474,11 +3474,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
|
||||
vscsi->dds.window[LOCAL].liobn,
|
||||
vscsi->dds.window[REMOTE].liobn);
|
||||
|
||||
strcpy(vscsi->eye, "VSCSI ");
|
||||
strncat(vscsi->eye, vdev->name, MAX_EYE);
|
||||
snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
|
||||
|
||||
vscsi->dds.unit_id = vdev->unit_address;
|
||||
strncpy(vscsi->dds.partition_name, partition_name,
|
||||
strscpy(vscsi->dds.partition_name, partition_name,
|
||||
sizeof(vscsi->dds.partition_name));
|
||||
vscsi->dds.partition_num = partition_number;
|
||||
|
||||
|
@@ -3335,64 +3335,19 @@ static void ipr_release_dump(struct kref *kref)
|
||||
LEAVE;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipr_worker_thread - Worker thread
|
||||
* @work: ioa config struct
|
||||
*
|
||||
* Called at task level from a work thread. This function takes care
|
||||
* of adding and removing device from the mid-layer as configuration
|
||||
* changes are detected by the adapter.
|
||||
*
|
||||
* Return value:
|
||||
* nothing
|
||||
**/
|
||||
static void ipr_worker_thread(struct work_struct *work)
|
||||
static void ipr_add_remove_thread(struct work_struct *work)
|
||||
{
|
||||
unsigned long lock_flags;
|
||||
struct ipr_resource_entry *res;
|
||||
struct scsi_device *sdev;
|
||||
struct ipr_dump *dump;
|
||||
struct ipr_ioa_cfg *ioa_cfg =
|
||||
container_of(work, struct ipr_ioa_cfg, work_q);
|
||||
container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
|
||||
u8 bus, target, lun;
|
||||
int did_work;
|
||||
|
||||
ENTER;
|
||||
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
||||
|
||||
if (ioa_cfg->sdt_state == READ_DUMP) {
|
||||
dump = ioa_cfg->dump;
|
||||
if (!dump) {
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
return;
|
||||
}
|
||||
kref_get(&dump->kref);
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
ipr_get_ioa_dump(ioa_cfg, dump);
|
||||
kref_put(&dump->kref, ipr_release_dump);
|
||||
|
||||
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
||||
if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
|
||||
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ioa_cfg->scsi_unblock) {
|
||||
ioa_cfg->scsi_unblock = 0;
|
||||
ioa_cfg->scsi_blocked = 0;
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
scsi_unblock_requests(ioa_cfg->host);
|
||||
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
||||
if (ioa_cfg->scsi_blocked)
|
||||
scsi_block_requests(ioa_cfg->host);
|
||||
}
|
||||
|
||||
if (!ioa_cfg->scan_enabled) {
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
restart:
|
||||
do {
|
||||
did_work = 0;
|
||||
@@ -3439,6 +3394,66 @@ restart:
|
||||
LEAVE;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipr_worker_thread - Worker thread
|
||||
* @work: ioa config struct
|
||||
*
|
||||
* Called at task level from a work thread. This function takes care
|
||||
* of adding and removing device from the mid-layer as configuration
|
||||
* changes are detected by the adapter.
|
||||
*
|
||||
* Return value:
|
||||
* nothing
|
||||
**/
|
||||
static void ipr_worker_thread(struct work_struct *work)
|
||||
{
|
||||
unsigned long lock_flags;
|
||||
struct ipr_dump *dump;
|
||||
struct ipr_ioa_cfg *ioa_cfg =
|
||||
container_of(work, struct ipr_ioa_cfg, work_q);
|
||||
|
||||
ENTER;
|
||||
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
||||
|
||||
if (ioa_cfg->sdt_state == READ_DUMP) {
|
||||
dump = ioa_cfg->dump;
|
||||
if (!dump) {
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
return;
|
||||
}
|
||||
kref_get(&dump->kref);
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
ipr_get_ioa_dump(ioa_cfg, dump);
|
||||
kref_put(&dump->kref, ipr_release_dump);
|
||||
|
||||
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
||||
if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
|
||||
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ioa_cfg->scsi_unblock) {
|
||||
ioa_cfg->scsi_unblock = 0;
|
||||
ioa_cfg->scsi_blocked = 0;
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
scsi_unblock_requests(ioa_cfg->host);
|
||||
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
||||
if (ioa_cfg->scsi_blocked)
|
||||
scsi_block_requests(ioa_cfg->host);
|
||||
}
|
||||
|
||||
if (!ioa_cfg->scan_enabled) {
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
schedule_work(&ioa_cfg->scsi_add_work_q);
|
||||
|
||||
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
|
||||
LEAVE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCSI_IPR_TRACE
|
||||
/**
|
||||
* ipr_read_trace - Dump the adapter trace
|
||||
@@ -9933,6 +9948,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
|
||||
INIT_LIST_HEAD(&ioa_cfg->free_res_q);
|
||||
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
|
||||
INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
|
||||
INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
|
||||
init_waitqueue_head(&ioa_cfg->reset_wait_q);
|
||||
init_waitqueue_head(&ioa_cfg->msi_wait_q);
|
||||
init_waitqueue_head(&ioa_cfg->eeh_wait_q);
|
||||
|
@@ -1575,6 +1575,7 @@ struct ipr_ioa_cfg {
|
||||
u8 saved_mode_page_len;
|
||||
|
||||
struct work_struct work_q;
|
||||
struct work_struct scsi_add_work_q;
|
||||
struct workqueue_struct *reset_work_q;
|
||||
|
||||
wait_queue_head_t reset_wait_q;
|
||||
|
@@ -360,12 +360,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
goto buffer_done;
|
||||
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
nrport = NULL;
|
||||
spin_lock(&vport->phba->hbalock);
|
||||
rport = lpfc_ndlp_get_nrport(ndlp);
|
||||
if (!rport)
|
||||
continue;
|
||||
|
||||
/* local short-hand pointer. */
|
||||
nrport = rport->remoteport;
|
||||
if (rport)
|
||||
nrport = rport->remoteport;
|
||||
spin_unlock(&vport->phba->hbalock);
|
||||
if (!nrport)
|
||||
continue;
|
||||
|
||||
@@ -3386,6 +3386,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
|
||||
struct lpfc_nodelist *ndlp;
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct nvme_fc_remote_port *remoteport = NULL;
|
||||
#endif
|
||||
|
||||
shost = lpfc_shost_from_vport(vport);
|
||||
@@ -3396,8 +3397,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
|
||||
if (ndlp->rport)
|
||||
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
spin_lock(&vport->phba->hbalock);
|
||||
rport = lpfc_ndlp_get_nrport(ndlp);
|
||||
if (rport)
|
||||
remoteport = rport->remoteport;
|
||||
spin_unlock(&vport->phba->hbalock);
|
||||
if (remoteport)
|
||||
nvme_fc_set_remoteport_devloss(rport->remoteport,
|
||||
vport->cfg_devloss_tmo);
|
||||
#endif
|
||||
|
@@ -551,7 +551,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
unsigned char *statep;
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct nvme_fc_remote_port *nrport;
|
||||
struct nvme_fc_remote_port *nrport = NULL;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
|
||||
cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
|
||||
@@ -696,11 +696,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
len += snprintf(buf + len, size - len, "\tRport List:\n");
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
/* local short-hand pointer. */
|
||||
spin_lock(&phba->hbalock);
|
||||
rport = lpfc_ndlp_get_nrport(ndlp);
|
||||
if (!rport)
|
||||
continue;
|
||||
|
||||
nrport = rport->remoteport;
|
||||
if (rport)
|
||||
nrport = rport->remoteport;
|
||||
spin_unlock(&phba->hbalock);
|
||||
if (!nrport)
|
||||
continue;
|
||||
|
||||
|
@@ -2725,7 +2725,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
|
||||
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
|
||||
|
||||
spin_lock_irq(&vport->phba->hbalock);
|
||||
oldrport = lpfc_ndlp_get_nrport(ndlp);
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
if (!oldrport)
|
||||
lpfc_nlp_get(ndlp);
|
||||
|
||||
@@ -2840,7 +2842,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct nvme_fc_remote_port *remoteport;
|
||||
struct nvme_fc_remote_port *remoteport = NULL;
|
||||
|
||||
localport = vport->localport;
|
||||
|
||||
@@ -2854,11 +2856,14 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
if (!lport)
|
||||
goto input_err;
|
||||
|
||||
spin_lock_irq(&vport->phba->hbalock);
|
||||
rport = lpfc_ndlp_get_nrport(ndlp);
|
||||
if (!rport)
|
||||
if (rport)
|
||||
remoteport = rport->remoteport;
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
if (!remoteport)
|
||||
goto input_err;
|
||||
|
||||
remoteport = rport->remoteport;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6033 Unreg nvme remoteport %p, portname x%llx, "
|
||||
"port_id x%06x, portstate x%x port type x%x\n",
|
||||
|
@@ -374,8 +374,8 @@ struct atio_from_isp {
|
||||
static inline int fcpcmd_is_corrupted(struct atio *atio)
|
||||
{
|
||||
if (atio->entry_type == ATIO_TYPE7 &&
|
||||
(le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
|
||||
FCP_CMD_LENGTH_MIN))
|
||||
((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
|
||||
FCP_CMD_LENGTH_MIN))
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
|
@@ -1276,7 +1276,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
|
||||
case REQ_OP_ZONE_RESET:
|
||||
return sd_zbc_setup_reset_cmnd(cmd);
|
||||
default:
|
||||
BUG();
|
||||
WARN_ON_ONCE(1);
|
||||
return BLKPREP_KILL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2959,6 +2960,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
|
||||
if (rot == 1) {
|
||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
|
||||
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
|
||||
} else {
|
||||
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
|
||||
blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
|
||||
}
|
||||
|
||||
if (sdkp->device->type == TYPE_ZBC) {
|
||||
|
@@ -7940,6 +7940,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
|
||||
err = -ENOMEM;
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not use blk-mq at this time because blk-mq does not support
|
||||
* runtime pm.
|
||||
*/
|
||||
host->use_blk_mq = false;
|
||||
|
||||
hba = shost_priv(host);
|
||||
hba->host = host;
|
||||
hba->dev = dev;
|
||||
|
Посилання в новій задачі
Заблокувати користувача