The FC transport offloads the connect attempt to a workqueue. Thus
userspace is not able to wait on the result.
Thus, allow userspace to wait on the connect result by honnering the
'connect_sync' connect option.
Signed-off-by: Daniel Wagner <dwagner@suse.de>
---
drivers/nvme/host/fc.c | 26 +++++++++++++++++++++++++-
1 file changed, 25 insertions(+), 1 deletion(-)
@@ -169,6 +169,7 @@ struct nvme_fc_ctrl {
struct work_struct ioerr_work;
struct delayed_work connect_work;
+ struct completion connect_completion;
struct kref ref;
unsigned long flags;
@@ -803,6 +804,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Couldn't schedule reset.\n",
ctrl->cnum);
+ complete(&ctrl->connect_completion);
nvme_fc_ctrl_put(ctrl);
}
break;
@@ -871,6 +873,7 @@ nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: controller connectivity lost.\n",
ctrl->cnum);
+ complete(&ctrl->connect_completion);
nvme_fc_ctrl_put(ctrl);
} else
nvme_fc_ctrl_connectivity_loss(ctrl);
@@ -3308,6 +3311,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
(ctrl->ctrl.opts->max_reconnects *
ctrl->ctrl.opts->reconnect_delay)));
+ complete(&ctrl->connect_completion);
nvme_fc_ctrl_put(ctrl);
}
}
@@ -3367,10 +3371,12 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
ret = nvme_fc_create_association(ctrl);
if (ret)
nvme_fc_reconnect_or_delete(ctrl, ret);
- else
+ else {
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: controller connect complete\n",
ctrl->cnum);
+ complete(&ctrl->connect_completion);
+ }
}
@@ -3477,6 +3483,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+ init_completion(&ctrl->connect_completion);
INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock);
@@ -3524,6 +3531,9 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);
+ if (opts->connect_sync)
+ nvme_fc_ctrl_get(ctrl);
+
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
dev_err(ctrl->ctrl.device,
@@ -3540,6 +3550,19 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
flush_delayed_work(&ctrl->connect_work);
+ if (opts->connect_sync) {
+ enum nvme_ctrl_state state;
+
+ wait_for_completion(&ctrl->connect_completion);
+ state = nvme_ctrl_state(&ctrl->ctrl);
+ nvme_fc_ctrl_put(ctrl);
+
+ if (state != NVME_CTRL_LIVE) {
+ /* Cleanup is handled by the connect state machine */
+ return ERR_PTR(-EIO);
+ }
+ }
+
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\", hostnqn: %s\n",
ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl), opts->host->nqn);
@@ -3896,6 +3919,7 @@ nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport unloading: deleting ctrl\n",
ctrl->cnum);
+ complete(&ctrl->connect_completion);
nvme_fc_ctrl_put(ctrl);
}
spin_unlock(&rport->lock);