@@ -3098,7 +3098,7 @@ static int srp_sdev_count(struct Scsi_Host *host)
struct scsi_device *sdev;
int c = 0;
- shost_for_each_device(sdev, host)
+ shost_for_each_device(sdev, host, 1)
c++;
return c;
@@ -1310,7 +1310,7 @@ mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
*/
karg->numDevices = 0;
if (ioc->sh) {
- shost_for_each_device(sdev, ioc->sh) {
+ shost_for_each_device(sdev, ioc->sh, 1) {
vdevice = sdev->hostdata;
if (vdevice == NULL || vdevice->vtarget == NULL)
continue;
@@ -1416,7 +1416,7 @@ mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
/* Get number of devices
*/
if (ioc->sh){
- shost_for_each_device(sdev, ioc->sh) {
+ shost_for_each_device(sdev, ioc->sh, 1) {
if (!maxWordsLeft)
continue;
vdevice = sdev->hostdata;
@@ -1889,7 +1889,7 @@ mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __u
cpu_to_le32(ioc->sense_buf_low_dma
+ (req_idx * MPT_SENSE_BUFFER_ALLOC));
- shost_for_each_device(sdev, ioc->sh) {
+ shost_for_each_device(sdev, ioc->sh, 1) {
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
@@ -623,7 +623,7 @@ mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
/*
* Set OS mapping
*/
- shost_for_each_device(sdev, ioc->sh) {
+ shost_for_each_device(sdev, ioc->sh, 1) {
starget = scsi_target(sdev);
rphy = dev_to_rphy(starget->dev.parent);
if (rphy->identify.sas_address == sas_address) {
@@ -996,7 +996,7 @@ mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id)
VirtDevice *vdevice;
VirtTarget *vtarget = NULL;
- shost_for_each_device(sdev, ioc->sh) {
+ shost_for_each_device(sdev, ioc->sh, 1) {
vdevice = sdev->hostdata;
if ((vdevice == NULL) ||
(vdevice->vtarget == NULL))
@@ -3768,7 +3768,7 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
ioc->name, phy_info->attached.id,
phy_info->attached.channel));
- shost_for_each_device(sdev, ioc->sh) {
+ shost_for_each_device(sdev, ioc->sh, 1) {
vdevice = sdev->hostdata;
if ((vdevice == NULL) ||
(vdevice->vtarget == NULL))
@@ -4101,7 +4101,7 @@ mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
mutex_unlock(&ioc->sas_device_info_mutex);
if (id != -1) {
- shost_for_each_device(sdev, ioc->sh) {
+ shost_for_each_device(sdev, ioc->sh, 1) {
if (sdev->id == id && sdev->channel == channel) {
if (current_depth > sdev->queue_depth) {
sdev_printk(KERN_INFO, sdev,
@@ -1125,7 +1125,7 @@ static void mpt_work_wrapper(struct work_struct *work)
if (!pg3)
return;
- shost_for_each_device(sdev,shost) {
+ shost_for_each_device(sdev, shost, 1) {
struct scsi_target *starget = scsi_target(sdev);
VirtTarget *vtarget = starget->hostdata;
@@ -1267,7 +1267,7 @@ mptspi_dv_renegotiate_work(struct work_struct *work)
kfree(wqw);
if (hd->spi_pending) {
- shost_for_each_device(sdev, ioc->sh) {
+ shost_for_each_device(sdev, ioc->sh, 1) {
if (hd->spi_pending & (1 << sdev->id))
continue;
starget = scsi_target(sdev);
@@ -1278,7 +1278,7 @@ mptspi_dv_renegotiate_work(struct work_struct *work)
mptspi_write_spi_device_pg1(starget, &pg1);
}
} else {
- shost_for_each_device(sdev, ioc->sh)
+ shost_for_each_device(sdev, ioc->sh, 1)
mptspi_dv_device(hd, sdev);
}
}
@@ -2036,7 +2036,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
/* can't use generic zfcp_erp_modify_port_status because
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ shost_for_each_device(sdev, port->adapter->scsi_host, 1)
if (sdev_to_zfcp(sdev)->port == port)
atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
&sdev_to_zfcp(sdev)->status);
@@ -2058,7 +2058,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port
*/
atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ shost_for_each_device(sdev, port->adapter->scsi_host, 1)
if (sdev_to_zfcp(sdev)->port == port)
atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
&sdev_to_zfcp(sdev)->status);
@@ -350,7 +350,7 @@ static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
(struct zfcp_adapter *)shost->hostdata[0];
int ret;
- shost_for_each_device(tmp_sdev, shost) {
+ shost_for_each_device(tmp_sdev, shost, 1) {
if (tmp_sdev->id == starget->id) {
sdev = tmp_sdev;
break;
@@ -2760,7 +2760,7 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
seq_printf(m, "\nAttached devices:\n");
- shost_for_each_device(scd, instance) {
+ shost_for_each_device(scd, instance, 1) {
seq_printf(m, "Device/Lun TaggedQ Sync\n");
seq_printf(m, " %d/%llu ", scd->id, scd->lun);
if (scd->tagged_supported)
@@ -2589,7 +2589,7 @@ int fas216_eh_bus_reset(struct scsi_cmnd *SCpnt)
* all command structures. Leave the running
* command in place.
*/
- shost_for_each_device(SDpnt, info->host) {
+ shost_for_each_device(SDpnt, info->host, 1) {
int i;
if (SDpnt->soft_reset)
@@ -2999,7 +2999,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
seq_puts(m, "Device/Lun TaggedQ Parity Sync\n");
- shost_for_each_device(scd, info->host) {
+ shost_for_each_device(scd, info->host, 1) {
dev = &info->device[scd->id];
seq_printf(m, " %d/%llu ", scd->id, scd->lun);
if (scd->tagged_supported)
@@ -874,7 +874,7 @@ bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) &&
((jiffies - itnim->last_queue_full_time) >
BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) {
- shost_for_each_device(tmp_sdev, sdev->host) {
+ shost_for_each_device(tmp_sdev, sdev->host, 1) {
if (bfa_lun_queue_depth > tmp_sdev->queue_depth) {
if (tmp_sdev->id != sdev->id)
continue;
@@ -894,7 +894,7 @@ bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
itnim->last_queue_full_time = jiffies;
- shost_for_each_device(tmp_sdev, sdev->host) {
+ shost_for_each_device(tmp_sdev, sdev->host, 1) {
if (tmp_sdev->id != sdev->id)
continue;
scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
@@ -3012,7 +3012,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
unsigned int found;
ENTER;
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
dev_rport = starget_to_rport(scsi_target(sdev));
if (dev_rport != rport)
continue;
@@ -185,7 +185,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
if (vports != NULL)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
new_queue_depth =
sdev->queue_depth * num_rsrc_err /
(num_rsrc_err + num_cmd_success);
@@ -223,7 +223,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
if (vports != NULL)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
rport = starget_to_rport(scsi_target(sdev));
fc_remote_port_delete(rport);
}
@@ -3444,7 +3444,7 @@ enable_sdev_max_qd_store(struct device *cdev,
else
instance->enable_sdev_max_qd = false;
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
ret_target_prop = megasas_get_target_prop(instance, sdev);
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
megasas_set_fw_assisted_qd(sdev, is_target_prop);
@@ -5110,7 +5110,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
&instance->reset_flags);
instance->instancet->enable_intr(instance);
megasas_enable_irq_poll(instance);
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
if ((instance->tgt_prop) &&
(instance->nvme_page_size))
ret_target_prop = megasas_get_target_prop(instance, sdev);
@@ -3879,7 +3879,7 @@ enable_sdev_max_qd_store(struct device *cdev,
switch (val) {
case 0:
ioc->enable_sdev_max_qd = 0;
- shost_for_each_device(sdev, ioc->shost) {
+ shost_for_each_device(sdev, ioc->shost, 1) {
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data)
continue;
@@ -3922,7 +3922,7 @@ enable_sdev_max_qd_store(struct device *cdev,
break;
case 1:
ioc->enable_sdev_max_qd = 1;
- shost_for_each_device(sdev, ioc->shost)
+ shost_for_each_device(sdev, ioc->shost, 1)
mpt3sas_scsih_change_queue_depth(sdev,
shost->can_queue);
break;
@@ -2888,7 +2888,7 @@ mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct scsi_device *sdev;
u8 skip = 0;
- shost_for_each_device(sdev, ioc->shost) {
+ shost_for_each_device(sdev, ioc->shost, 1) {
if (skip)
continue;
sas_device_priv_data = sdev->hostdata;
@@ -2916,7 +2916,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct scsi_device *sdev;
u8 skip = 0;
- shost_for_each_device(sdev, ioc->shost) {
+ shost_for_each_device(sdev, ioc->shost, 1) {
if (skip)
continue;
sas_device_priv_data = sdev->hostdata;
@@ -3836,7 +3836,7 @@ _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct scsi_device *sdev;
- shost_for_each_device(sdev, ioc->shost) {
+ shost_for_each_device(sdev, ioc->shost, 1) {
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data)
continue;
@@ -3866,7 +3866,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct scsi_device *sdev;
- shost_for_each_device(sdev, ioc->shost) {
+ shost_for_each_device(sdev, ioc->shost, 1) {
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
continue;
@@ -3893,7 +3893,7 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct scsi_device *sdev;
- shost_for_each_device(sdev, ioc->shost) {
+ shost_for_each_device(sdev, ioc->shost, 1) {
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data)
continue;
@@ -3925,7 +3925,7 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
- shost_for_each_device(sdev, ioc->shost) {
+ shost_for_each_device(sdev, ioc->shost, 1) {
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data)
continue;
@@ -9654,7 +9654,7 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct scsi_device *sdev;
- shost_for_each_device(sdev, ioc->shost) {
+ shost_for_each_device(sdev, ioc->shost, 1) {
sas_device_priv_data = sdev->hostdata;
if (sas_device_priv_data && sas_device_priv_data->sas_target)
sas_device_priv_data->sas_target->deleted = 1;
@@ -9676,7 +9676,7 @@ _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
u16 qdepth;
ioc_info(ioc, "Update devices with firmware reported queue depth\n");
- shost_for_each_device(sdev, ioc->shost) {
+ shost_for_each_device(sdev, ioc->shost, 1) {
sas_device_priv_data = sdev->hostdata;
if (sas_device_priv_data && sas_device_priv_data->sas_target) {
sas_target_priv_data = sas_device_priv_data->sas_target;
@@ -357,7 +357,7 @@ static void myrb_get_errtable(struct myrb_hba *cb)
size_t err_table_offset;
struct scsi_device *sdev;
- shost_for_each_device(sdev, cb->host) {
+ shost_for_each_device(sdev, cb->host, 1) {
if (sdev->channel >= myrb_logical_channel(cb->host))
continue;
err_table_offset = sdev->channel * MYRB_MAX_TARGETS
@@ -2141,7 +2141,7 @@ static void myrs_monitor(struct work_struct *work)
info->exp_active != 0) {
struct scsi_device *sdev;
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
struct myrs_ldev_info *ldev_info;
int ldev_num;
@@ -704,6 +704,23 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
return 0;
}
+static int __scsi_device_get(struct scsi_device *sdev, bool check_state)
+{
+ if (check_state &&
+ (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL))
+ goto fail;
+ if (!try_module_get(sdev->host->hostt->module))
+ goto fail;
+ if (!get_device(&sdev->sdev_gendev))
+ goto fail_put_module;
+ return 0;
+
+fail_put_module:
+ module_put(sdev->host->hostt->module);
+fail:
+ return -ENXIO;
+}
+
/**
* scsi_device_get - get an additional reference to a scsi_device
* @sdev: device to get a reference to
@@ -717,18 +734,7 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
*/
int scsi_device_get(struct scsi_device *sdev)
{
- if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
- goto fail;
- if (!try_module_get(sdev->host->hostt->module))
- goto fail;
- if (!get_device(&sdev->sdev_gendev))
- goto fail_put_module;
- return 0;
-
-fail_put_module:
- module_put(sdev->host->hostt->module);
-fail:
- return -ENXIO;
+ return __scsi_device_get(sdev, 1);
}
EXPORT_SYMBOL(scsi_device_get);
@@ -751,7 +757,8 @@ EXPORT_SYMBOL(scsi_device_put);
/* helper for shost_for_each_device, see that for documentation */
struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
- struct scsi_device *prev)
+ struct scsi_device *prev,
+ bool check_state)
{
struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
struct scsi_device *next = NULL;
@@ -761,7 +768,7 @@ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
while (list->next != &shost->__devices) {
next = list_entry(list->next, struct scsi_device, siblings);
/* skip devices that we can't get a reference to */
- if (!scsi_device_get(next))
+ if (!__scsi_device_get(next, check_state))
break;
next = NULL;
list = list->next;
@@ -790,7 +797,7 @@ void starget_for_each_device(struct scsi_target *starget, void *data,
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct scsi_device *sdev;
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
if ((sdev->channel == starget->channel) &&
(sdev->id == starget->id))
fn(sdev, data);
@@ -1352,7 +1352,7 @@ static void all_config_cdb_len(void)
mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
shost = sdbg_host->shost;
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
config_cdb_len(sdev);
}
}
@@ -407,7 +407,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
int cmd_cancel = 0;
int devices_failed = 0;
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 0) {
list_for_each_entry(scmd, work_q, eh_entry) {
if (scmd->device == sdev) {
++total_failures;
@@ -743,7 +743,7 @@ static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
* Walk all devices of a target and do
* ramp up on them.
*/
- shost_for_each_device(tmp_sdev, sdev->host) {
+ shost_for_each_device(tmp_sdev, sdev->host, 1) {
if (tmp_sdev->channel != sdev->channel ||
tmp_sdev->id != sdev->id ||
tmp_sdev->queue_depth == sdev->max_queue_depth)
@@ -762,7 +762,7 @@ static void scsi_handle_queue_full(struct scsi_device *sdev)
if (!sht->track_queue_depth)
return;
- shost_for_each_device(tmp_sdev, sdev->host) {
+ shost_for_each_device(tmp_sdev, sdev->host, 1) {
if (tmp_sdev->channel != sdev->channel ||
tmp_sdev->id != sdev->id)
continue;
@@ -1501,7 +1501,7 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
struct scsi_cmnd *scmd, *stu_scmd, *next;
struct scsi_device *sdev;
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
if (scsi_host_eh_past_deadline(shost)) {
SCSI_LOG_ERROR_RECOVERY(3,
sdev_printk(KERN_INFO, sdev,
@@ -1568,7 +1568,7 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
struct scsi_device *sdev;
enum scsi_disposition rtn;
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 0) {
if (scsi_host_eh_past_deadline(shost)) {
SCSI_LOG_ERROR_RECOVERY(3,
sdev_printk(KERN_INFO, sdev,
@@ -2120,7 +2120,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* onto the head of the SCSI request queue for the device. There
* is no point trying to lock the door of an off-line device.
*/
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 0) {
if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
scsi_eh_lock_door(sdev);
sdev->was_reset = 0;
@@ -470,7 +470,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
- shost_for_each_device(sdev, shost)
+ shost_for_each_device(sdev, shost, 1)
scsi_run_queue(sdev->request_queue);
}
@@ -2968,7 +2968,7 @@ scsi_host_block(struct Scsi_Host *shost)
* Call scsi_internal_device_block_nowait so we can avoid
* calling synchronize_rcu() for each LUN.
*/
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
mutex_lock(&sdev->state_mutex);
ret = scsi_internal_device_block_nowait(sdev);
mutex_unlock(&sdev->state_mutex);
@@ -2991,7 +2991,7 @@ scsi_host_unblock(struct Scsi_Host *shost, int new_state)
struct scsi_device *sdev;
int ret = 0;
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
ret = scsi_internal_device_unblock(sdev, new_state);
if (ret) {
scsi_device_put(sdev);
@@ -1804,7 +1804,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
/* target removed before the device could be added */
if (sdev->sdev_state == SDEV_DEL)
continue;
@@ -564,7 +564,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
* invoking scsi_target_unblock() won't change the state of
* these devices into running so do that explicitly.
*/
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
mutex_lock(&sdev->state_mutex);
if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING;
@@ -796,7 +796,7 @@ static int ses_intf_add(struct device *cdev)
/* see if there are any devices matching before
* we found the enclosure */
- shost_for_each_device(tmp_sdev, sdev->host) {
+ shost_for_each_device(tmp_sdev, sdev->host, 1) {
if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
continue;
ses_match_to_enclosure(edev, tmp_sdev, 0);
@@ -502,7 +502,7 @@ static void storvsc_host_scan(struct work_struct *work)
* may have been removed this way.
*/
mutex_lock(&host->scan_mutex);
- shost_for_each_device(sdev, host)
+ shost_for_each_device(sdev, host, 1)
scsi_test_unit_ready(sdev, 1, 1, NULL);
mutex_unlock(&host->scan_mutex);
/*
@@ -341,7 +341,7 @@ static int virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
if (!inq_result)
return -ENOMEM;
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
memset(scsi_cmd, 0, sizeof(scsi_cmd));
@@ -6308,7 +6308,7 @@ static void ufshcd_recover_pm_error(struct ufs_hba *hba)
* blk_queue_enter in case there are bios waiting inside it.
*/
if (!ret) {
- shost_for_each_device(sdev, shost) {
+ shost_for_each_device(sdev, shost, 1) {
q = sdev->request_queue;
if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
q->rpm_status == RPM_SUSPENDING))
@@ -10061,7 +10061,7 @@ static void ufshcd_wl_shutdown(struct device *dev)
/* Turn on everything while shutting down */
ufshcd_rpm_get_sync(hba);
scsi_device_quiesce(sdev);
- shost_for_each_device(sdev, hba->host) {
+ shost_for_each_device(sdev, hba->host, 1) {
if (sdev == hba->ufs_device_wlun)
continue;
scsi_device_quiesce(sdev);
@@ -389,21 +389,25 @@ extern void __starget_for_each_device(struct scsi_target *, void *,
/* only exposed to implement shost_for_each_device */
extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
- struct scsi_device *);
+ struct scsi_device *,
+ bool);
/**
* shost_for_each_device - iterate over all devices of a host
* @sdev: the &struct scsi_device to use as a cursor
* @shost: the &struct scsi_host to iterate over
+ * @check_state: if skip check scsi_device's state to skip some devices
+ * scsi_device with SDEV_DEL or SDEV_CANCEL would be skipped
+ * if this is true
*
* Iterator that returns each device attached to @shost. This loop
* takes a reference on each device and releases it at the end. If
* you break out of the loop, you must call scsi_device_put(sdev).
*/
-#define shost_for_each_device(sdev, shost) \
- for ((sdev) = __scsi_iterate_devices((shost), NULL); \
+#define shost_for_each_device(sdev, shost, check_state) \
+ for ((sdev) = __scsi_iterate_devices((shost), NULL, check_state); \
(sdev); \
- (sdev) = __scsi_iterate_devices((shost), (sdev)))
+ (sdev) = __scsi_iterate_devices((shost), (sdev), check_state))
/**
* __shost_for_each_device - iterate over all devices of a host (UNLOCKED)