sgi-xp: cleanup naming of partition defines

Cleanup naming of partition defines.

Signed-off-by: Dean Nelson <dcn@sgi.com>
Cc: Jack Steiner <steiner@sgi.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Dean Nelson 2008-07-29 22:34:18 -07:00 committed by Linus Torvalds
parent 61deb86e98
commit 83469b5525
5 changed files with 51 additions and 51 deletions

View File

@ -576,21 +576,21 @@ struct xpc_partition {
/* struct xpc_partition act_state values (for XPC HB) */
#define XPC_P_INACTIVE 0x00 /* partition is not active */
#define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */
#define XPC_P_ACTIVATING 0x02 /* activation thread started */
#define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */
#define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */
#define XPC_P_AS_INACTIVE 0x00 /* partition is not active */
#define XPC_P_AS_ACTIVATION_REQ 0x01 /* created thread to activate */
#define XPC_P_AS_ACTIVATING 0x02 /* activation thread started */
#define XPC_P_AS_ACTIVE 0x03 /* xpc_partition_up() was called */
#define XPC_P_AS_DEACTIVATING 0x04 /* partition deactivation initiated */
#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
xpc_deactivate_partition(__LINE__, (_p), (_reason))
/* struct xpc_partition setup_state values */
#define XPC_P_UNSET 0x00 /* infrastructure was never setup */
#define XPC_P_SETUP 0x01 /* infrastructure is setup */
#define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
#define XPC_P_SS_UNSET 0x00 /* infrastructure was never setup */
#define XPC_P_SS_SETUP 0x01 /* infrastructure is setup */
#define XPC_P_SS_WTEARDOWN 0x02 /* waiting to teardown infrastructure */
#define XPC_P_SS_TORNDOWN 0x03 /* infrastructure is torndown */
/*
* struct xpc_partition_sn2's dropped notify IRQ timer is set to wait the
@ -787,7 +787,7 @@ xpc_part_deref(struct xpc_partition *part)
s32 refs = atomic_dec_return(&part->references);
DBUG_ON(refs < 0);
if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN)
if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN)
wake_up(&part->teardown_wq);
}
@ -797,7 +797,7 @@ xpc_part_ref(struct xpc_partition *part)
int setup;
atomic_inc(&part->references);
setup = (part->setup_state == XPC_P_SETUP);
setup = (part->setup_state == XPC_P_SS_SETUP);
if (!setup)
xpc_part_deref(part);

View File

@ -99,7 +99,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
!(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
if (part->act_state == XPC_P_DEACTIVATING) {
if (part->act_state == XPC_P_AS_DEACTIVATING) {
/* can't proceed until the other side disengages from us */
if (xpc_partition_engaged(ch->partid))
return;
@ -155,7 +155,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
/* we won't lose the CPU since we're holding ch->lock */
complete(&ch->wdisconnect_wait);
} else if (ch->delayed_chctl_flags) {
if (part->act_state != XPC_P_DEACTIVATING) {
if (part->act_state != XPC_P_AS_DEACTIVATING) {
/* time to take action on any delayed chctl flags */
spin_lock(&part->chctl_lock);
part->chctl.flags[ch->number] |=
@ -276,7 +276,7 @@ again:
"%d, channel=%d\n", ch->partid, ch->number);
if (ch->flags & XPC_C_DISCONNECTED) {
DBUG_ON(part->act_state != XPC_P_DEACTIVATING);
DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
}
@ -312,7 +312,7 @@ again:
"channel=%d\n", args->msg_size, args->local_nentries,
ch->partid, ch->number);
if (part->act_state == XPC_P_DEACTIVATING ||
if (part->act_state == XPC_P_AS_DEACTIVATING ||
(ch->flags & XPC_C_ROPENREQUEST)) {
spin_unlock_irqrestore(&ch->lock, irq_flags);
return;
@ -546,7 +546,7 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part)
continue;
}
if (part->act_state == XPC_P_DEACTIVATING)
if (part->act_state == XPC_P_AS_DEACTIVATING)
continue;
if (!(ch_flags & XPC_C_CONNECTED)) {

View File

@ -290,8 +290,8 @@ xpc_check_remote_hb(void)
part = &xpc_partitions[partid];
if (part->act_state == XPC_P_INACTIVE ||
part->act_state == XPC_P_DEACTIVATING) {
if (part->act_state == XPC_P_AS_INACTIVE ||
part->act_state == XPC_P_AS_DEACTIVATING) {
continue;
}
@ -406,7 +406,7 @@ xpc_initiate_discovery(void *ignore)
static void
xpc_channel_mgr(struct xpc_partition *part)
{
while (part->act_state != XPC_P_DEACTIVATING ||
while (part->act_state != XPC_P_AS_DEACTIVATING ||
atomic_read(&part->nchannels_active) > 0 ||
!xpc_partition_disengaged(part)) {
@ -429,7 +429,7 @@ xpc_channel_mgr(struct xpc_partition *part)
(void)wait_event_interruptible(part->channel_mgr_wq,
(atomic_read(&part->channel_mgr_requests) > 0 ||
part->chctl.all_flags != 0 ||
(part->act_state == XPC_P_DEACTIVATING &&
(part->act_state == XPC_P_AS_DEACTIVATING &&
atomic_read(&part->nchannels_active) == 0 &&
xpc_partition_disengaged(part))));
atomic_set(&part->channel_mgr_requests, 1);
@ -458,16 +458,16 @@ xpc_activating(void *__partid)
spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_DEACTIVATING) {
part->act_state = XPC_P_INACTIVE;
if (part->act_state == XPC_P_AS_DEACTIVATING) {
part->act_state = XPC_P_AS_INACTIVE;
spin_unlock_irqrestore(&part->act_lock, irq_flags);
part->remote_rp_pa = 0;
return 0;
}
/* indicate the thread is activating */
DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
part->act_state = XPC_P_ACTIVATING;
DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
part->act_state = XPC_P_AS_ACTIVATING;
XPC_SET_REASON(part, 0, 0);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
@ -509,9 +509,9 @@ xpc_activate_partition(struct xpc_partition *part)
spin_lock_irqsave(&part->act_lock, irq_flags);
DBUG_ON(part->act_state != XPC_P_INACTIVE);
DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
part->act_state = XPC_P_ACTIVATION_REQ;
part->act_state = XPC_P_AS_ACTIVATION_REQ;
XPC_SET_REASON(part, xpCloneKThread, __LINE__);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
@ -520,7 +520,7 @@ xpc_activate_partition(struct xpc_partition *part)
partid);
if (IS_ERR(kthread)) {
spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_INACTIVE;
part->act_state = XPC_P_AS_INACTIVE;
XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
}
@ -786,7 +786,7 @@ xpc_disconnect_wait(int ch_number)
wakeup_channel_mgr = 0;
if (ch->delayed_chctl_flags) {
if (part->act_state != XPC_P_DEACTIVATING) {
if (part->act_state != XPC_P_AS_DEACTIVATING) {
spin_lock(&part->chctl_lock);
part->chctl.flags[ch->number] |=
ch->delayed_chctl_flags;
@ -846,7 +846,7 @@ xpc_do_exit(enum xp_retval reason)
part = &xpc_partitions[partid];
if (xpc_partition_disengaged(part) &&
part->act_state == XPC_P_INACTIVE) {
part->act_state == XPC_P_AS_INACTIVE) {
continue;
}
@ -962,7 +962,7 @@ xpc_die_deactivate(void)
part = &xpc_partitions[partid];
if (xpc_partition_engaged(partid) ||
part->act_state != XPC_P_INACTIVE) {
part->act_state != XPC_P_AS_INACTIVE) {
xpc_request_partition_deactivation(part);
xpc_indicate_partition_disengaged(part);
}
@ -1113,7 +1113,7 @@ xpc_init(void)
part->activate_IRQ_rcvd = 0;
spin_lock_init(&part->act_lock);
part->act_state = XPC_P_INACTIVE;
part->act_state = XPC_P_AS_INACTIVE;
XPC_SET_REASON(part, 0, 0);
init_timer(&part->disengage_timer);
@ -1121,7 +1121,7 @@ xpc_init(void)
xpc_timeout_partition_disengage;
part->disengage_timer.data = (unsigned long)part;
part->setup_state = XPC_P_UNSET;
part->setup_state = XPC_P_SS_UNSET;
init_waitqueue_head(&part->teardown_wq);
atomic_set(&part->references, 0);
}

View File

@ -273,9 +273,9 @@ xpc_partition_disengaged(struct xpc_partition *part)
if (!in_interrupt())
del_singleshot_timer_sync(&part->disengage_timer);
DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
part->act_state != XPC_P_INACTIVE);
if (part->act_state != XPC_P_INACTIVE)
DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING &&
part->act_state != XPC_P_AS_INACTIVE);
if (part->act_state != XPC_P_AS_INACTIVE)
xpc_wakeup_channel_mgr(part);
xpc_cancel_partition_deactivation_request(part);
@ -295,8 +295,8 @@ xpc_mark_partition_active(struct xpc_partition *part)
dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_ACTIVATING) {
part->act_state = XPC_P_ACTIVE;
if (part->act_state == XPC_P_AS_ACTIVATING) {
part->act_state = XPC_P_AS_ACTIVE;
ret = xpSuccess;
} else {
DBUG_ON(part->reason == xpSuccess);
@ -318,7 +318,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
spin_lock_irqsave(&part->act_lock, irq_flags);
if (part->act_state == XPC_P_INACTIVE) {
if (part->act_state == XPC_P_AS_INACTIVE) {
XPC_SET_REASON(part, reason, line);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
if (reason == xpReactivating) {
@ -327,7 +327,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
}
return;
}
if (part->act_state == XPC_P_DEACTIVATING) {
if (part->act_state == XPC_P_AS_DEACTIVATING) {
if ((part->reason == xpUnloading && reason != xpUnloading) ||
reason == xpReactivating) {
XPC_SET_REASON(part, reason, line);
@ -336,7 +336,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
return;
}
part->act_state = XPC_P_DEACTIVATING;
part->act_state = XPC_P_AS_DEACTIVATING;
XPC_SET_REASON(part, reason, line);
spin_unlock_irqrestore(&part->act_lock, irq_flags);
@ -367,7 +367,7 @@ xpc_mark_partition_inactive(struct xpc_partition *part)
XPC_PARTID(part));
spin_lock_irqsave(&part->act_lock, irq_flags);
part->act_state = XPC_P_INACTIVE;
part->act_state = XPC_P_AS_INACTIVE;
spin_unlock_irqrestore(&part->act_lock, irq_flags);
part->remote_rp_pa = 0;
}

View File

@ -327,7 +327,7 @@ xpc_send_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
union xpc_channel_ctl_flags chctl = { 0 };
enum xp_retval ret;
if (likely(part->act_state != XPC_P_DEACTIVATING)) {
if (likely(part->act_state != XPC_P_AS_DEACTIVATING)) {
chctl.flags[ch->number] = chctl_flag;
ret = xpc_send_IRQ_sn2(part_sn2->remote_chctl_amo_va,
chctl.all_flags,
@ -975,7 +975,7 @@ xpc_identify_activate_IRQ_req_sn2(int nasid)
remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]);
if (xpc_partition_disengaged(part) &&
part->act_state == XPC_P_INACTIVE) {
part->act_state == XPC_P_AS_INACTIVE) {
xpc_update_partition_info_sn2(part, remote_rp_version,
&remote_rp_ts_jiffies,
@ -1257,10 +1257,10 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
}
/*
* With the setting of the partition setup_state to XPC_P_SETUP, we're
* declaring that this partition is ready to go.
* With the setting of the partition setup_state to XPC_P_SS_SETUP,
* we're declaring that this partition is ready to go.
*/
part->setup_state = XPC_P_SETUP;
part->setup_state = XPC_P_SS_SETUP;
/*
* Setup the per partition specific variables required by the
@ -1323,8 +1323,8 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part)
DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
DBUG_ON(atomic_read(&part->nchannels_active) != 0);
DBUG_ON(part->setup_state != XPC_P_SETUP);
part->setup_state = XPC_P_WTEARDOWN;
DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
part->setup_state = XPC_P_SS_WTEARDOWN;
xpc_vars_part_sn2[partid].magic = 0;
@ -1338,7 +1338,7 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part)
/* now we can begin tearing down the infrastructure */
part->setup_state = XPC_P_TORNDOWN;
part->setup_state = XPC_P_SS_TORNDOWN;
/* in case we've still got outstanding timers registered... */
del_timer_sync(&part_sn2->dropped_notify_IRQ_timer);
@ -1375,7 +1375,7 @@ xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst,
DBUG_ON((unsigned long)dst != L1_CACHE_ALIGN((unsigned long)dst));
DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
if (part->act_state == XPC_P_DEACTIVATING)
if (part->act_state == XPC_P_AS_DEACTIVATING)
return part->reason;
ret = xp_remote_memcpy(xp_pa(dst), src_pa, cnt);
@ -1534,7 +1534,7 @@ xpc_make_first_contact_sn2(struct xpc_partition *part)
/* wait a 1/4 of a second or so */
(void)msleep_interruptible(250);
if (part->act_state == XPC_P_DEACTIVATING)
if (part->act_state == XPC_P_AS_DEACTIVATING)
return part->reason;
}