mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
Merge branch 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-uv-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, UV: Correct UV2 BAU destination timeout x86, UV: Correct failed topology memory leak x86, UV: Remove cpumask_t from the stack x86, UV: Rename hubmask to pnmask x86, UV: Correct reset_with_ipi() x86, UV: Allow for non-consecutive sockets x86, UV: Inline header file functions x86, UV: Fix smp_processor_id() use in a preemptable region x66, UV: Enable 64-bit ACPI MFCG support for SGI UV2 platform x86, UV: Clean up uv_mmrs.h
This commit is contained in:
commit
3e0b8df79d
4 changed files with 1677 additions and 1315 deletions
|
@ -67,7 +67,7 @@
|
|||
* we're using 655us, similar to UV1: 65 units of 10us
|
||||
*/
|
||||
#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
|
||||
#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (65*10UL)
|
||||
#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
|
||||
|
||||
#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \
|
||||
UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \
|
||||
|
@ -106,12 +106,20 @@
|
|||
#define DS_SOURCE_TIMEOUT 3
|
||||
/*
|
||||
* bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2
|
||||
* values 1 and 5 will not occur
|
||||
* values 1 and 3 will not occur
|
||||
* Decoded meaning ERROR BUSY AUX ERR
|
||||
* ------------------------------- ---- ----- -------
|
||||
* IDLE 0 0 0
|
||||
* BUSY (active) 0 1 0
|
||||
* SW Ack Timeout (destination) 1 0 0
|
||||
* SW Ack INTD rejected (strong NACK) 1 0 1
|
||||
* Source Side Time Out Detected 1 1 0
|
||||
* Destination Side PUT Failed 1 1 1
|
||||
*/
|
||||
#define UV2H_DESC_IDLE 0
|
||||
#define UV2H_DESC_DEST_TIMEOUT 2
|
||||
#define UV2H_DESC_DEST_STRONG_NACK 3
|
||||
#define UV2H_DESC_BUSY 4
|
||||
#define UV2H_DESC_BUSY 2
|
||||
#define UV2H_DESC_DEST_TIMEOUT 4
|
||||
#define UV2H_DESC_DEST_STRONG_NACK 5
|
||||
#define UV2H_DESC_SOURCE_TIMEOUT 6
|
||||
#define UV2H_DESC_DEST_PUT_ERR 7
|
||||
|
||||
|
@ -183,7 +191,7 @@
|
|||
* 'base_dest_nasid' field of the header corresponds to the
|
||||
* destination nodeID associated with that specified bit.
|
||||
*/
|
||||
struct bau_targ_hubmask {
|
||||
struct pnmask {
|
||||
unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
|
||||
};
|
||||
|
||||
|
@ -314,7 +322,7 @@ struct bau_msg_header {
|
|||
* Should be 64 bytes
|
||||
*/
|
||||
struct bau_desc {
|
||||
struct bau_targ_hubmask distribution;
|
||||
struct pnmask distribution;
|
||||
/*
|
||||
* message template, consisting of header and payload:
|
||||
*/
|
||||
|
@ -488,6 +496,7 @@ struct bau_control {
|
|||
struct bau_control *uvhub_master;
|
||||
struct bau_control *socket_master;
|
||||
struct ptc_stats *statp;
|
||||
cpumask_t *cpumask;
|
||||
unsigned long timeout_interval;
|
||||
unsigned long set_bau_on_time;
|
||||
atomic_t active_descriptor_count;
|
||||
|
@ -526,90 +535,90 @@ struct bau_control {
|
|||
struct hub_and_pnode *thp;
|
||||
};
|
||||
|
||||
static unsigned long read_mmr_uv2_status(void)
|
||||
static inline unsigned long read_mmr_uv2_status(void)
|
||||
{
|
||||
return read_lmmr(UV2H_LB_BAU_SB_ACTIVATION_STATUS_2);
|
||||
}
|
||||
|
||||
static void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
|
||||
static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
|
||||
{
|
||||
write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image);
|
||||
}
|
||||
|
||||
static void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
|
||||
static inline void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
|
||||
{
|
||||
write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image);
|
||||
}
|
||||
|
||||
static void write_mmr_activation(unsigned long index)
|
||||
static inline void write_mmr_activation(unsigned long index)
|
||||
{
|
||||
write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
|
||||
}
|
||||
|
||||
static void write_gmmr_activation(int pnode, unsigned long mmr_image)
|
||||
static inline void write_gmmr_activation(int pnode, unsigned long mmr_image)
|
||||
{
|
||||
write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image);
|
||||
}
|
||||
|
||||
static void write_mmr_payload_first(int pnode, unsigned long mmr_image)
|
||||
static inline void write_mmr_payload_first(int pnode, unsigned long mmr_image)
|
||||
{
|
||||
write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image);
|
||||
}
|
||||
|
||||
static void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
|
||||
static inline void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
|
||||
{
|
||||
write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image);
|
||||
}
|
||||
|
||||
static void write_mmr_payload_last(int pnode, unsigned long mmr_image)
|
||||
static inline void write_mmr_payload_last(int pnode, unsigned long mmr_image)
|
||||
{
|
||||
write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image);
|
||||
}
|
||||
|
||||
static void write_mmr_misc_control(int pnode, unsigned long mmr_image)
|
||||
static inline void write_mmr_misc_control(int pnode, unsigned long mmr_image)
|
||||
{
|
||||
write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
|
||||
}
|
||||
|
||||
static unsigned long read_mmr_misc_control(int pnode)
|
||||
static inline unsigned long read_mmr_misc_control(int pnode)
|
||||
{
|
||||
return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL);
|
||||
}
|
||||
|
||||
static void write_mmr_sw_ack(unsigned long mr)
|
||||
static inline void write_mmr_sw_ack(unsigned long mr)
|
||||
{
|
||||
uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
|
||||
}
|
||||
|
||||
static unsigned long read_mmr_sw_ack(void)
|
||||
static inline unsigned long read_mmr_sw_ack(void)
|
||||
{
|
||||
return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
|
||||
}
|
||||
|
||||
static unsigned long read_gmmr_sw_ack(int pnode)
|
||||
static inline unsigned long read_gmmr_sw_ack(int pnode)
|
||||
{
|
||||
return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
|
||||
}
|
||||
|
||||
static void write_mmr_data_config(int pnode, unsigned long mr)
|
||||
static inline void write_mmr_data_config(int pnode, unsigned long mr)
|
||||
{
|
||||
uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr);
|
||||
}
|
||||
|
||||
static inline int bau_uvhub_isset(int uvhub, struct bau_targ_hubmask *dstp)
|
||||
static inline int bau_uvhub_isset(int uvhub, struct pnmask *dstp)
|
||||
{
|
||||
return constant_test_bit(uvhub, &dstp->bits[0]);
|
||||
}
|
||||
static inline void bau_uvhub_set(int pnode, struct bau_targ_hubmask *dstp)
|
||||
static inline void bau_uvhub_set(int pnode, struct pnmask *dstp)
|
||||
{
|
||||
__set_bit(pnode, &dstp->bits[0]);
|
||||
}
|
||||
static inline void bau_uvhubs_clear(struct bau_targ_hubmask *dstp,
|
||||
static inline void bau_uvhubs_clear(struct pnmask *dstp,
|
||||
int nbits)
|
||||
{
|
||||
bitmap_zero(&dstp->bits[0], nbits);
|
||||
}
|
||||
static inline int bau_uvhub_weight(struct bau_targ_hubmask *dstp)
|
||||
static inline int bau_uvhub_weight(struct pnmask *dstp)
|
||||
{
|
||||
return bitmap_weight((unsigned long *)&dstp->bits[0],
|
||||
UV_DISTRIBUTION_SIZE);
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -519,7 +519,8 @@ static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
|
|||
if (cfg->address < 0xFFFFFFFF)
|
||||
return 0;
|
||||
|
||||
if (!strcmp(mcfg->header.oem_id, "SGI"))
|
||||
if (!strcmp(mcfg->header.oem_id, "SGI") ||
|
||||
!strcmp(mcfg->header.oem_id, "SGI2"))
|
||||
return 0;
|
||||
|
||||
if (mcfg->header.revision >= 1) {
|
||||
|
|
|
@ -296,14 +296,18 @@ static void bau_process_message(struct msg_desc *mdp,
|
|||
}
|
||||
|
||||
/*
|
||||
* Determine the first cpu on a uvhub.
|
||||
* Determine the first cpu on a pnode.
|
||||
*/
|
||||
static int uvhub_to_first_cpu(int uvhub)
|
||||
static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
|
||||
{
|
||||
int cpu;
|
||||
for_each_present_cpu(cpu)
|
||||
if (uvhub == uv_cpu_to_blade_id(cpu))
|
||||
struct hub_and_pnode *hpp;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
hpp = &smaster->thp[cpu];
|
||||
if (pnode == hpp->pnode)
|
||||
return cpu;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -366,28 +370,32 @@ static void do_reset(void *ptr)
|
|||
* Use IPI to get all target uvhubs to release resources held by
|
||||
* a given sending cpu number.
|
||||
*/
|
||||
static void reset_with_ipi(struct bau_targ_hubmask *distribution, int sender)
|
||||
static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
|
||||
{
|
||||
int uvhub;
|
||||
int pnode;
|
||||
int apnode;
|
||||
int maskbits;
|
||||
cpumask_t mask;
|
||||
int sender = bcp->cpu;
|
||||
cpumask_t *mask = bcp->uvhub_master->cpumask;
|
||||
struct bau_control *smaster = bcp->socket_master;
|
||||
struct reset_args reset_args;
|
||||
|
||||
reset_args.sender = sender;
|
||||
cpus_clear(mask);
|
||||
cpus_clear(*mask);
|
||||
/* find a single cpu for each uvhub in this distribution mask */
|
||||
maskbits = sizeof(struct bau_targ_hubmask) * BITSPERBYTE;
|
||||
for (uvhub = 0; uvhub < maskbits; uvhub++) {
|
||||
maskbits = sizeof(struct pnmask) * BITSPERBYTE;
|
||||
/* each bit is a pnode relative to the partition base pnode */
|
||||
for (pnode = 0; pnode < maskbits; pnode++) {
|
||||
int cpu;
|
||||
if (!bau_uvhub_isset(uvhub, distribution))
|
||||
if (!bau_uvhub_isset(pnode, distribution))
|
||||
continue;
|
||||
/* find a cpu for this uvhub */
|
||||
cpu = uvhub_to_first_cpu(uvhub);
|
||||
cpu_set(cpu, mask);
|
||||
apnode = pnode + bcp->partition_base_pnode;
|
||||
cpu = pnode_to_first_cpu(apnode, smaster);
|
||||
cpu_set(cpu, *mask);
|
||||
}
|
||||
|
||||
/* IPI all cpus; preemption is already disabled */
|
||||
smp_call_function_many(&mask, do_reset, (void *)&reset_args, 1);
|
||||
smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -604,7 +612,7 @@ static void destination_plugged(struct bau_desc *bau_desc,
|
|||
quiesce_local_uvhub(hmaster);
|
||||
|
||||
spin_lock(&hmaster->queue_lock);
|
||||
reset_with_ipi(&bau_desc->distribution, bcp->cpu);
|
||||
reset_with_ipi(&bau_desc->distribution, bcp);
|
||||
spin_unlock(&hmaster->queue_lock);
|
||||
|
||||
end_uvhub_quiesce(hmaster);
|
||||
|
@ -626,7 +634,7 @@ static void destination_timeout(struct bau_desc *bau_desc,
|
|||
quiesce_local_uvhub(hmaster);
|
||||
|
||||
spin_lock(&hmaster->queue_lock);
|
||||
reset_with_ipi(&bau_desc->distribution, bcp->cpu);
|
||||
reset_with_ipi(&bau_desc->distribution, bcp);
|
||||
spin_unlock(&hmaster->queue_lock);
|
||||
|
||||
end_uvhub_quiesce(hmaster);
|
||||
|
@ -1334,9 +1342,10 @@ static ssize_t tunables_write(struct file *file, const char __user *user,
|
|||
|
||||
instr[count] = '\0';
|
||||
|
||||
bcp = &per_cpu(bau_control, smp_processor_id());
|
||||
|
||||
cpu = get_cpu();
|
||||
bcp = &per_cpu(bau_control, cpu);
|
||||
ret = parse_tunables_write(bcp, instr, count);
|
||||
put_cpu();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1686,6 +1695,16 @@ static void make_per_cpu_thp(struct bau_control *smaster)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Each uvhub is to get a local cpumask.
|
||||
*/
|
||||
static void make_per_hub_cpumask(struct bau_control *hmaster)
|
||||
{
|
||||
int sz = sizeof(cpumask_t);
|
||||
|
||||
hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize all the per_cpu information for the cpu's on a given socket,
|
||||
* given what has been gathered into the socket_desc struct.
|
||||
|
@ -1751,11 +1770,12 @@ static int __init summarize_uvhub_sockets(int nuvhubs,
|
|||
sdp = &bdp->socket[socket];
|
||||
if (scan_sock(sdp, bdp, &smaster, &hmaster))
|
||||
return 1;
|
||||
make_per_cpu_thp(smaster);
|
||||
}
|
||||
socket++;
|
||||
socket_mask = (socket_mask >> 1);
|
||||
make_per_cpu_thp(smaster);
|
||||
}
|
||||
make_per_hub_cpumask(hmaster);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1777,15 +1797,20 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
|
|||
uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
|
||||
|
||||
if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
|
||||
return 1;
|
||||
goto fail;
|
||||
|
||||
if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
|
||||
return 1;
|
||||
goto fail;
|
||||
|
||||
kfree(uvhub_descs);
|
||||
kfree(uvhub_mask);
|
||||
init_per_cpu_tunables();
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
kfree(uvhub_descs);
|
||||
kfree(uvhub_mask);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue