for-linus-20181123

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAlv4RQUQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgprfJEACnPxJfX+v1nGan+Al5gwdL4/2nU7yOXmb9
 6KLs27fbFfRuB+1HxPM4GOgPyfNUmzwdcwMEt/slrPQ85Zykl2ooDv48xmFh3Orr
 zRdHi00CRoIOhqRcobfXiBak8VwwBxxb8TNp1WbcbCxeaURRETSqylxRhyzVoNYL
 2h5AqcnrnHzDRdYWW7iNmoj5D+opUoV8oiRWeq5t+g3lDFt73w9IAxFM+OD9edSm
 wXvFVcCaijAAGlESww+DjfAl9g7JiJlvrP+y1Yz2HR4ZPvFFX+PP7rr2Mx+FV7rn
 lluA2NBE2I+I6KrErmwrpWbonWOH1tgTvaGeNemXy1FWbLLdhoCInbkmqyPnDHnD
 GTzaWuMe6nbtp3Wr+rnHwdqpjLxMjik9eGvI5JsLMFHfT0n9BjCJuqLBjU+QARU/
 D3rKLoKZeIVasM0NFhd/vPzK1P2kVPjszJ48orksnIvkjxzoG/YKzlN6wYMW+/z3
 eEWzO9u9BDUjA7ayShmzL6LjRsOts0IRtVU3fYJIsvtKUbwo6JXTiqDpNQWLYUIj
 qspuu0TehMy4mDq9ynNpRjL/PIQ7KUMb7OMtyheLVBpqbJ2GK+wohxU/wKZws4WM
 8IfYcvsnOfR0muMFCBZyPefO9q4wKrVqp4VP48R2lZ7S9YOndLBCxeDw/5xYAsH7
 ht/GCtxfOA==
 =7Ur6
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-20181123' of git://git.kernel.dk/linux-block

Pull block fix from Jens Axboe:
 "Just a single fix for this week, fixing an issue with nvme-fc"

* tag 'for-linus-20181123' of git://git.kernel.dk/linux-block:
  nvme-fc: resolve io failures during connect
This commit is contained in:
Linus Torvalds 2018-11-23 11:20:14 -08:00
commit 3381918fec
1 changed files with 64 additions and 11 deletions

View File

@ -152,6 +152,7 @@ struct nvme_fc_ctrl {
bool ioq_live;
bool assoc_active;
atomic_t err_work_active;
u64 association_id;
struct list_head ctrl_list; /* rport->ctrl_list */
@ -160,6 +161,7 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set tag_set;
struct delayed_work connect_work;
struct work_struct err_work;
struct kref ref;
u32 flags;
@ -1531,6 +1533,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
int i;
/* ensure we've initialized the ops once */
if (!(aen_op->flags & FCOP_FLAGS_AEN))
return;
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
__nvme_fc_abort_op(ctrl, aen_op);
}
@ -2049,7 +2055,25 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
/* only proceed if in LIVE state - e.g. on first error */
int active;
/*
* if an error (io timeout, etc) while (re)connecting,
* it's an error on creating the new association.
* Start the error recovery thread if it hasn't already
* been started. It is expected there could be multiple
* ios hitting this path before things are cleaned up.
*/
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
active = atomic_xchg(&ctrl->err_work_active, 1);
if (!active && !schedule_work(&ctrl->err_work)) {
atomic_set(&ctrl->err_work_active, 0);
WARN_ON(1);
}
return;
}
/* Otherwise, only proceed if in LIVE state - e.g. on first error */
if (ctrl->ctrl.state != NVME_CTRL_LIVE)
return;
@ -2814,6 +2838,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@ -2865,6 +2890,21 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
}
}
static void
__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
{
nvme_stop_keep_alive(&ctrl->ctrl);
/* will block will waiting for io to terminate */
nvme_fc_delete_association(ctrl);
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: error_recovery: Couldn't change state "
"to CONNECTING\n", ctrl->cnum);
}
static void
nvme_fc_reset_ctrl_work(struct work_struct *work)
{
@ -2872,18 +2912,10 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
int ret;
__nvme_fc_terminate_io(ctrl);
nvme_stop_ctrl(&ctrl->ctrl);
/* will block will waiting for io to terminate */
nvme_fc_delete_association(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: error_recovery: Couldn't change state "
"to CONNECTING\n", ctrl->cnum);
return;
}
if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
ret = nvme_fc_create_association(ctrl);
else
@ -2897,6 +2929,24 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
ctrl->cnum);
}
static void
nvme_fc_connect_err_work(struct work_struct *work)
{
struct nvme_fc_ctrl *ctrl =
container_of(work, struct nvme_fc_ctrl, err_work);
__nvme_fc_terminate_io(ctrl);
atomic_set(&ctrl->err_work_active, 0);
/*
* Rescheduling the connection after recovering
* from the io error is left to the reconnect work
* item, which is what should have stalled waiting on
* the io that had the error that scheduled this work.
*/
}
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.name = "fc",
.module = THIS_MODULE,
@ -3007,6 +3057,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->cnum = idx;
ctrl->ioq_live = false;
ctrl->assoc_active = false;
atomic_set(&ctrl->err_work_active, 0);
init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
@ -3014,6 +3065,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@ -3103,6 +3155,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->connect_work);
ctrl->ctrl.opts = NULL;