diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index 1f3540826e68..c08db9c2375d 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c @@ -632,7 +632,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) ch->number, ch->partid); spin_unlock_irqrestore(&ch->lock, *irq_flags); - xpc_create_kthreads(ch, 1); + xpc_create_kthreads(ch, 1, 0); spin_lock_irqsave(&ch->lock, *irq_flags); } @@ -754,12 +754,12 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) /* make sure all activity has settled down first */ - if (atomic_read(&ch->references) > 0 || - ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && - !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE))) { + if (atomic_read(&ch->kthreads_assigned) > 0 || + atomic_read(&ch->references) > 0) { return; } - DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); + DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); if (part->act_state == XPC_P_DEACTIVATING) { /* can't proceed until the other side disengages from us */ @@ -1651,6 +1651,11 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, /* wake all idle kthreads so they can exit */ if (atomic_read(&ch->kthreads_idle) > 0) { wake_up_all(&ch->idle_wq); + + } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { + /* start a kthread that will do the xpcDisconnecting callout */ + xpc_create_kthreads(ch, 1, 1); } /* wake those waiting to allocate an entry from the local msg queue */ diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index fa96dfc0e1aa..7a387d237363 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c @@ -681,7 +681,7 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed) dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", needed, ch->partid, ch->number); - xpc_create_kthreads(ch, needed); + xpc_create_kthreads(ch, needed, 0); } @@ -775,26 +775,28 @@ xpc_daemonize_kthread(void *args) xpc_kthread_waitmsgs(part, ch); } - if (atomic_dec_return(&ch->kthreads_assigned) == 0) { - spin_lock_irqsave(&ch->lock, irq_flags); - if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && - !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { - ch->flags |= XPC_C_DISCONNECTINGCALLOUT; - spin_unlock_irqrestore(&ch->lock, irq_flags); + /* let registerer know that connection is disconnecting */ - xpc_disconnect_callout(ch, xpcDisconnecting); - - spin_lock_irqsave(&ch->lock, irq_flags); - ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; - } + spin_lock_irqsave(&ch->lock, irq_flags); + if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { + ch->flags |= XPC_C_DISCONNECTINGCALLOUT; spin_unlock_irqrestore(&ch->lock, irq_flags); + + xpc_disconnect_callout(ch, xpcDisconnecting); + + spin_lock_irqsave(&ch->lock, irq_flags); + ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; + } + spin_unlock_irqrestore(&ch->lock, irq_flags); + + if (atomic_dec_return(&ch->kthreads_assigned) == 0) { if (atomic_dec_return(&part->nchannels_engaged) == 0) { xpc_mark_partition_disengaged(part); xpc_IPI_send_disengage(part); } } - xpc_msgqueue_deref(ch); dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", @@ -818,7 +820,8 @@ xpc_daemonize_kthread(void *args) * partition. */ void -xpc_create_kthreads(struct xpc_channel *ch, int needed) +xpc_create_kthreads(struct xpc_channel *ch, int needed, + int ignore_disconnecting) { unsigned long irq_flags; pid_t pid; @@ -833,16 +836,38 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed) * kthread. That kthread is responsible for doing the * counterpart to the following before it exits. */ + if (ignore_disconnecting) { + if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { + /* kthreads assigned had gone to zero */ + BUG_ON(!(ch->flags & + XPC_C_DISCONNECTINGCALLOUT_MADE)); + break; + } + + } else if (ch->flags & XPC_C_DISCONNECTING) { + break; + + } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) { + if (atomic_inc_return(&part->nchannels_engaged) == 1) + xpc_mark_partition_engaged(part); + } (void) xpc_part_ref(part); xpc_msgqueue_ref(ch); - if (atomic_inc_return(&ch->kthreads_assigned) == 1 && - atomic_inc_return(&part->nchannels_engaged) == 1) { - xpc_mark_partition_engaged(part); - } pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); if (pid < 0) { /* the fork failed */ + + /* + * NOTE: if (ignore_disconnecting && + * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, + * then we'll deadlock if all other kthreads assigned + * to this channel are blocked in the channel's + * registerer, because the only thing that will unblock + * them is the xpcDisconnecting callout that this + * failed kernel_thread would have made. + */ + if (atomic_dec_return(&ch->kthreads_assigned) == 0 && atomic_dec_return(&part->nchannels_engaged) == 0) { xpc_mark_partition_disengaged(part); @@ -857,9 +882,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed) * Flag this as an error only if we have an * insufficient #of kthreads for the channel * to function. - * - * No xpc_msgqueue_ref() is needed here since - * the channel mgr is doing this. */ spin_lock_irqsave(&ch->lock, irq_flags); XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index 1d45e1518fb3..e52b8508083b 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h @@ -673,7 +673,7 @@ extern irqreturn_t xpc_notify_IRQ_handler(int, void *); extern void xpc_dropped_IPI_check(struct xpc_partition *); extern void xpc_activate_partition(struct xpc_partition *); extern void xpc_activate_kthreads(struct xpc_channel *, int); -extern void xpc_create_kthreads(struct xpc_channel *, int); +extern void xpc_create_kthreads(struct xpc_channel *, int, int); extern void xpc_disconnect_wait(int);