powerpc: Merge in 64-bit powermac support.

This brings in a lot of changes from arch/ppc64/kernel/pmac_*.c to
arch/powerpc/platforms/powermac/*.c and makes various minor tweaks
elsewhere.  On the powermac we now initialize ppc_md by copying
the whole pmac_md structure into it, which required some changes in
the ordering of initializations of individual fields of it.

Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Paul Mackerras 2005-10-22 16:02:39 +10:00
parent b6ba92819d
commit 35499c0195
17 changed files with 1163 additions and 837 deletions

View File

@ -290,6 +290,7 @@ config PPC_PMAC
config PPC_PMAC64
bool
depends on PPC_PMAC && POWER4
select U3_DART
default y
config PPC_PREP

View File

@ -1501,20 +1501,17 @@ copy_to_here:
.section ".text";
.align 2 ;
.globl pmac_secondary_start_1
pmac_secondary_start_1:
li r24, 1
b .pmac_secondary_start
.globl pmac_secondary_start_2
pmac_secondary_start_2:
li r24, 2
b .pmac_secondary_start
.globl pmac_secondary_start_3
pmac_secondary_start_3:
li r24, 3
b .pmac_secondary_start
.globl __secondary_start_pmac_0
__secondary_start_pmac_0:
/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
li r24,0
b 1f
li r24,1
b 1f
li r24,2
b 1f
li r24,3
1:
_GLOBAL(pmac_secondary_start)
/* turn on 64-bit mode */

View File

@ -772,7 +772,7 @@ static unsigned long __init prom_next_cell(int s, cell_t **cellp)
}
r = *p++;
#ifdef CONFIG_PPC64
if (s) {
if (s > 1) {
r <<= 32;
r |= *(p++);
}
@ -2059,7 +2059,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
reloc_got2(-offset);
#endif
__start(hdr, 0, 0);
__start(hdr, KERNELBASE + offset, 0);
return 0;
}

View File

@ -464,14 +464,11 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
#endif /* CONFIG_CMDLINE */
platform_init();
#ifdef CONFIG_6xx
ppc_md.power_save = ppc6xx_idle;
#endif
#ifdef CONFIG_POWER4
ppc_md.power_save = power4_idle;
#endif
platform_init();
if (ppc_md.progress)
ppc_md.progress("id mach(): done", 0x200);

View File

@ -1,4 +1,4 @@
ifeq ($(CONFIG_PPC32),y)
ifeq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_PPC_PMAC) += powermac/
endif
obj-$(CONFIG_4xx) += 4xx/

View File

@ -1,8 +1,8 @@
obj-$(CONFIG_PPC_PMAC) += pic.o setup.o time.o feature.o pci.o \
obj-y += pic.o setup.o time.o feature.o pci.o \
sleep.o low_i2c.o cache.o
obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq.o
ifeq ($(CONFIG_PPC_PMAC),y)
obj-$(CONFIG_NVRAM) += nvram.o
# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
obj-$(CONFIG_PPC64) += nvram.o
obj-$(CONFIG_SMP) += smp.o
endif

View File

@ -2960,7 +2960,6 @@ static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
void __init pmac_check_ht_link(void)
{
#if 0 /* Disabled for now */
u32 ufreq, freq, ucfg, cfg;
struct device_node *pcix_node;
u8 px_bus, px_devfn;
@ -2991,10 +2990,8 @@ void __init pmac_check_ht_link(void)
early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg);
early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq);
dump_HT_speeds("PCI-X HT Downlink", cfg, freq);
#endif
}
#endif /* CONFIG_POWER4 */
#endif /* 0 */
/*
* Early video resume hook

View File

@ -47,7 +47,8 @@
/* On Core99, nvram is either a sharp, a micron or an AMD flash */
#define SM_FLASH_STATUS_DONE 0x80
#define SM_FLASH_STATUS_ERR 0x38
#define SM_FLASH_STATUS_ERR 0x38
#define SM_FLASH_CMD_ERASE_CONFIRM 0xd0
#define SM_FLASH_CMD_ERASE_SETUP 0x20
#define SM_FLASH_CMD_RESET 0xff
@ -75,11 +76,11 @@ struct core99_header {
* Read and write the non-volatile RAM on PowerMacs and CHRP machines.
*/
static int nvram_naddrs;
static volatile unsigned char *nvram_addr;
static volatile unsigned char *nvram_data;
static int nvram_mult, is_core_99;
static int is_core_99;
static int core99_bank = 0;
static int nvram_partitions[3];
// XXX Turn that into a sem
static DEFINE_SPINLOCK(nv_lock);
extern int pmac_newworld;
@ -105,6 +106,52 @@ static void core99_nvram_write_byte(int addr, unsigned char val)
nvram_image[addr] = val;
}
static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index)
{
int i;
if (nvram_image == NULL)
return -ENODEV;
if (*index > NVRAM_SIZE)
return 0;
i = *index;
if (i + count > NVRAM_SIZE)
count = NVRAM_SIZE - i;
memcpy(buf, &nvram_image[i], count);
*index = i + count;
return count;
}
static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index)
{
int i;
if (nvram_image == NULL)
return -ENODEV;
if (*index > NVRAM_SIZE)
return 0;
i = *index;
if (i + count > NVRAM_SIZE)
count = NVRAM_SIZE - i;
memcpy(&nvram_image[i], buf, count);
*index = i + count;
return count;
}
static ssize_t core99_nvram_size(void)
{
if (nvram_image == NULL)
return -ENODEV;
return NVRAM_SIZE;
}
#ifdef CONFIG_PPC32
static volatile unsigned char *nvram_addr;
static int nvram_mult;
static unsigned char direct_nvram_read_byte(int addr)
{
@ -181,7 +228,7 @@ static void pmu_nvram_write_byte(int addr, unsigned char val)
}
#endif /* CONFIG_ADB_PMU */
#endif /* CONFIG_PPC32 */
static u8 chrp_checksum(struct chrp_header* hdr)
{
@ -249,7 +296,7 @@ static int sm_erase_bank(int bank)
timeout = 0;
do {
if (++timeout > 1000000) {
printk(KERN_ERR "nvram: Sharp/Miron flash erase timeout !\n");
printk(KERN_ERR "nvram: Sharp/Micron flash erase timeout !\n");
break;
}
out_8(base, SM_FLASH_CMD_READ_STATUS);
@ -411,7 +458,7 @@ static void __init lookup_partitions(void)
buffer[16] = 0;
do {
for (i=0;i<16;i++)
buffer[i] = nvram_read_byte(offset+i);
buffer[i] = ppc_md.nvram_read_val(offset+i);
if (!strcmp(hdr->name, "common"))
nvram_partitions[pmac_nvram_OF] = offset + 0x10;
if (!strcmp(hdr->name, "APL,MacOS75")) {
@ -467,65 +514,76 @@ static void core99_nvram_sync(void)
#endif
}
void __init pmac_nvram_init(void)
static int __init core99_nvram_setup(struct device_node *dp)
{
int i;
u32 gen_bank0, gen_bank1;
if (nvram_naddrs < 1) {
printk(KERN_ERR "nvram: no address\n");
return -EINVAL;
}
nvram_image = alloc_bootmem(NVRAM_SIZE);
if (nvram_image == NULL) {
printk(KERN_ERR "nvram: can't allocate ram image\n");
return -ENOMEM;
}
nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2);
nvram_naddrs = 1; /* Make sure we get the correct case */
DBG("nvram: Checking bank 0...\n");
gen_bank0 = core99_check((u8 *)nvram_data);
gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE);
core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0;
DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1);
DBG("nvram: Active bank is: %d\n", core99_bank);
for (i=0; i<NVRAM_SIZE; i++)
nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
ppc_md.nvram_read_val = core99_nvram_read_byte;
ppc_md.nvram_write_val = core99_nvram_write_byte;
ppc_md.nvram_read = core99_nvram_read;
ppc_md.nvram_write = core99_nvram_write;
ppc_md.nvram_size = core99_nvram_size;
ppc_md.nvram_sync = core99_nvram_sync;
/*
* Maybe we could be smarter here though making an exclusive list
* of known flash chips is a bit nasty as older OF didn't provide us
* with a useful "compatible" entry. A solution would be to really
* identify the chip using flash id commands and base ourselves on
* a list of known chips IDs
*/
if (device_is_compatible(dp, "amd-0137")) {
core99_erase_bank = amd_erase_bank;
core99_write_bank = amd_write_bank;
} else {
core99_erase_bank = sm_erase_bank;
core99_write_bank = sm_write_bank;
}
return 0;
}
int __init pmac_nvram_init(void)
{
struct device_node *dp;
int err = 0;
nvram_naddrs = 0;
dp = find_devices("nvram");
if (dp == NULL) {
printk(KERN_ERR "Can't find NVRAM device\n");
return;
return -ENODEV;
}
nvram_naddrs = dp->n_addrs;
is_core_99 = device_is_compatible(dp, "nvram,flash");
if (is_core_99) {
int i;
u32 gen_bank0, gen_bank1;
if (nvram_naddrs < 1) {
printk(KERN_ERR "nvram: no address\n");
return;
}
nvram_image = alloc_bootmem(NVRAM_SIZE);
if (nvram_image == NULL) {
printk(KERN_ERR "nvram: can't allocate ram image\n");
return;
}
nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2);
nvram_naddrs = 1; /* Make sure we get the correct case */
DBG("nvram: Checking bank 0...\n");
gen_bank0 = core99_check((u8 *)nvram_data);
gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE);
core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0;
DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1);
DBG("nvram: Active bank is: %d\n", core99_bank);
for (i=0; i<NVRAM_SIZE; i++)
nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
ppc_md.nvram_read_val = core99_nvram_read_byte;
ppc_md.nvram_write_val = core99_nvram_write_byte;
ppc_md.nvram_sync = core99_nvram_sync;
/*
* Maybe we could be smarter here though making an exclusive list
* of known flash chips is a bit nasty as older OF didn't provide us
* with a useful "compatible" entry. A solution would be to really
* identify the chip using flash id commands and base ourselves on
* a list of known chips IDs
*/
if (device_is_compatible(dp, "amd-0137")) {
core99_erase_bank = amd_erase_bank;
core99_write_bank = amd_write_bank;
} else {
core99_erase_bank = sm_erase_bank;
core99_write_bank = sm_write_bank;
}
} else if (_machine == _MACH_chrp && nvram_naddrs == 1) {
if (is_core_99)
err = core99_nvram_setup(dp);
#ifdef CONFIG_PPC32
else if (_machine == _MACH_chrp && nvram_naddrs == 1) {
nvram_data = ioremap(dp->addrs[0].address + isa_mem_base,
dp->addrs[0].size);
nvram_mult = 1;
@ -547,11 +605,14 @@ void __init pmac_nvram_init(void)
ppc_md.nvram_read_val = pmu_nvram_read_byte;
ppc_md.nvram_write_val = pmu_nvram_write_byte;
#endif /* CONFIG_ADB_PMU */
} else {
printk(KERN_ERR "Don't know how to access NVRAM with %d addresses\n",
nvram_naddrs);
}
#endif
else {
printk(KERN_ERR "Incompatible type of NVRAM\n");
return -ENXIO;
}
lookup_partitions();
return err;
}
int pmac_get_partition(int partition)
@ -561,9 +622,9 @@ int pmac_get_partition(int partition)
u8 pmac_xpram_read(int xpaddr)
{
int offset = nvram_partitions[pmac_nvram_XPRAM];
int offset = pmac_get_partition(pmac_nvram_XPRAM);
if (offset < 0)
if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
return 0xff;
return ppc_md.nvram_read_val(xpaddr + offset);
@ -571,9 +632,9 @@ u8 pmac_xpram_read(int xpaddr)
void pmac_xpram_write(int xpaddr, u8 data)
{
int offset = nvram_partitions[pmac_nvram_XPRAM];
int offset = pmac_get_partition(pmac_nvram_XPRAM);
if (offset < 0)
if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
return;
ppc_md.nvram_write_val(xpaddr + offset, data);

View File

@ -37,14 +37,14 @@
#endif
static int add_bridge(struct device_node *dev);
extern void pmac_check_ht_link(void);
/* XXX Could be per-controller, but I don't think we risk anything by
* assuming we won't have both UniNorth and Bandit */
static int has_uninorth;
#ifdef CONFIG_POWER4
#ifdef CONFIG_PPC64
static struct pci_controller *u3_agp;
#endif /* CONFIG_POWER4 */
static struct pci_controller *u3_ht;
#endif /* CONFIG_PPC64 */
extern u8 pci_cache_line_size;
extern int pcibios_assign_bus_offset;
@ -229,6 +229,7 @@ static struct pci_ops macrisc_pci_ops =
macrisc_write_config
};
#ifdef CONFIG_PPC32
/*
* Verify that a specific (bus, dev_fn) exists on chaos
*/
@ -282,7 +283,19 @@ static struct pci_ops chaos_pci_ops =
chaos_write_config
};
#ifdef CONFIG_POWER4
static void __init setup_chaos(struct pci_controller *hose,
struct reg_property *addr)
{
/* assume a `chaos' bridge */
hose->ops = &chaos_pci_ops;
hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
}
#else
#define setup_chaos(hose, addr)
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
/*
* These versions of U3 HyperTransport config space access ops do not
* implement self-view of the HT host yet
@ -445,8 +458,9 @@ static struct pci_ops u3_ht_pci_ops =
u3_ht_read_config,
u3_ht_write_config
};
#endif /* CONFIG_POWER4 */
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
/*
* For a bandit bridge, turn on cache coherency if necessary.
* N.B. we could clean this up using the hose ops directly.
@ -487,7 +501,6 @@ static void __init init_bandit(struct pci_controller *bp)
printk(KERN_INFO "Cache coherency enabled for bandit/PSX\n");
}
/*
* Tweak the PCI-PCI bridge chip on the blue & white G3s.
*/
@ -539,7 +552,7 @@ static void __init fixup_nec_usb2(void)
struct pci_controller *hose;
u32 data, *prop;
u8 bus, devfn;
prop = (u32 *)get_property(nec, "vendor-id", NULL);
if (prop == NULL)
continue;
@ -605,8 +618,27 @@ static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
(void)in_le32(bp->cfg_data);
}
static int __init
setup_uninorth(struct pci_controller* hose, struct reg_property* addr)
void __init setup_grackle(struct pci_controller *hose)
{
setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
if (machine_is_compatible("AAPL,PowerBook1998"))
grackle_set_loop_snoop(hose, 1);
#if 0 /* Disabled for now, HW problems ??? */
grackle_set_stg(hose, 1);
#endif
}
static void __init setup_bandit(struct pci_controller *hose,
struct reg_property *addr)
{
hose->ops = &macrisc_pci_ops;
hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
init_bandit(hose);
}
static int __init setup_uninorth(struct pci_controller *hose,
struct reg_property *addr)
{
pci_assign_all_buses = 1;
has_uninorth = 1;
@ -616,31 +648,14 @@ setup_uninorth(struct pci_controller* hose, struct reg_property* addr)
/* We "know" that the bridge at f2000000 has the PCI slots. */
return addr->address == 0xf2000000;
}
#endif
static void __init
setup_bandit(struct pci_controller* hose, struct reg_property* addr)
{
hose->ops = &macrisc_pci_ops;
hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
init_bandit(hose);
}
static void __init
setup_chaos(struct pci_controller* hose, struct reg_property* addr)
{
/* assume a `chaos' bridge */
hose->ops = &chaos_pci_ops;
hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
}
#ifdef CONFIG_POWER4
#ifdef CONFIG_PPC64
static void __init setup_u3_agp(struct pci_controller* hose)
{
/* On G5, we move AGP up to high bus number so we don't need
* to reassign bus numbers for HT. If we ever have P2P bridges
* on AGP, we'll have to move pci_assign_all_buses to the
* on AGP, we'll have to move pci_assign_all_busses to the
* pci_controller structure so we enable it for AGP and not for
* HT childs.
* We hard code the address because of the different size of
@ -679,8 +694,7 @@ static void __init setup_u3_ht(struct pci_controller* hose)
* then read its configuration register (if any).
*/
hose->io_base_phys = 0xf4000000;
hose->io_base_virt = ioremap(hose->io_base_phys, 0x00400000);
isa_io_base = pci_io_base = (unsigned long) hose->io_base_virt;
hose->pci_io_size = 0x00400000;
hose->io_resource.name = np->full_name;
hose->io_resource.start = 0;
hose->io_resource.end = 0x003fffff;
@ -693,6 +707,8 @@ static void __init setup_u3_ht(struct pci_controller* hose)
hose->mem_resources[0].end = 0xefffffff;
hose->mem_resources[0].flags = IORESOURCE_MEM;
u3_ht = hose;
if (u3_agp == NULL) {
DBG("U3 has no AGP, using full resource range\n");
return;
@ -730,7 +746,7 @@ static void __init setup_u3_ht(struct pci_controller* hose)
printk(KERN_WARNING "Running out of resources for /ht host !\n");
hose->mem_resources[cur].end = res->start - 1;
continue;
}
}
cur++;
DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
cur-1, res->start - 1, cur, res->end + 1);
@ -742,126 +758,17 @@ static void __init setup_u3_ht(struct pci_controller* hose)
}
}
#endif /* CONFIG_POWER4 */
void __init
setup_grackle(struct pci_controller *hose)
/* XXX this needs to be converged between ppc32 and ppc64... */
static struct pci_controller * __init pcibios_alloc_controller(void)
{
setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
if (machine_is_compatible("AAPL,PowerBook1998"))
grackle_set_loop_snoop(hose, 1);
#if 0 /* Disabled for now, HW problems ??? */
grackle_set_stg(hose, 1);
#endif
struct pci_controller *hose;
hose = alloc_bootmem(sizeof(struct pci_controller));
if (hose)
pci_setup_pci_controller(hose);
return hose;
}
static void __init pmac_process_bridge_OF_ranges(struct pci_controller *hose,
struct device_node *dev, int primary)
{
static unsigned int static_lc_ranges[2024];
unsigned int *dt_ranges, *lc_ranges, *ranges, *prev;
unsigned int size;
int rlen = 0, orig_rlen;
int memno = 0;
struct resource *res;
int np, na = prom_n_addr_cells(dev);
np = na + 5;
/* First we try to merge ranges to fix a problem with some pmacs
* that can have more than 3 ranges, fortunately using contiguous
* addresses -- BenH
*/
dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
if (!dt_ranges)
return;
/* lc_ranges = alloc_bootmem(rlen);*/
lc_ranges = static_lc_ranges;
if (!lc_ranges)
return; /* what can we do here ? */
memcpy(lc_ranges, dt_ranges, rlen);
orig_rlen = rlen;
/* Let's work on a copy of the "ranges" property instead of damaging
* the device-tree image in memory
*/
ranges = lc_ranges;
prev = NULL;
while ((rlen -= np * sizeof(unsigned int)) >= 0) {
if (prev) {
if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
(prev[2] + prev[na+4]) == ranges[2] &&
(prev[na+2] + prev[na+4]) == ranges[na+2]) {
prev[na+4] += ranges[na+4];
ranges[0] = 0;
ranges += np;
continue;
}
}
prev = ranges;
ranges += np;
}
/*
* The ranges property is laid out as an array of elements,
* each of which comprises:
* cells 0 - 2: a PCI address
* cells 3 or 3+4: a CPU physical address
* (size depending on dev->n_addr_cells)
* cells 4+5 or 5+6: the size of the range
*/
ranges = lc_ranges;
rlen = orig_rlen;
while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
res = NULL;
size = ranges[na+4];
switch (ranges[0] >> 24) {
case 1: /* I/O space */
if (ranges[2] != 0)
break;
hose->io_base_phys = ranges[na+2];
/* limit I/O space to 16MB */
if (size > 0x01000000)
size = 0x01000000;
hose->io_base_virt = ioremap(ranges[na+2], size);
if (primary)
isa_io_base = (unsigned long) hose->io_base_virt;
res = &hose->io_resource;
res->flags = IORESOURCE_IO;
res->start = ranges[2];
break;
case 2: /* memory space */
memno = 0;
if (ranges[1] == 0 && ranges[2] == 0
&& ranges[na+4] <= (16 << 20)) {
/* 1st 16MB, i.e. ISA memory area */
#if 0
if (primary)
isa_mem_base = ranges[na+2];
#endif
memno = 1;
}
while (memno < 3 && hose->mem_resources[memno].flags)
++memno;
if (memno == 0)
hose->pci_mem_offset = ranges[na+2] - ranges[2];
if (memno < 3) {
res = &hose->mem_resources[memno];
res->flags = IORESOURCE_MEM;
res->start = ranges[na+2];
}
break;
}
if (res != NULL) {
res->name = dev->full_name;
res->end = res->start + size - 1;
res->parent = NULL;
res->sibling = NULL;
res->child = NULL;
}
ranges += np;
}
}
/*
* We assume that if we have a G3 powermac, we have one bridge called
@ -872,70 +779,78 @@ static int __init add_bridge(struct device_node *dev)
{
int len;
struct pci_controller *hose;
#ifdef CONFIG_PPC32
struct reg_property *addr;
char* disp_name;
#endif
char *disp_name;
int *bus_range;
int primary = 1;
DBG("Adding PCI host bridge %s\n", dev->full_name);
addr = (struct reg_property *) get_property(dev, "reg", &len);
if (addr == NULL || len < sizeof(*addr)) {
printk(KERN_WARNING "Can't use %s: no address\n",
dev->full_name);
return -ENODEV;
}
bus_range = (int *) get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
dev->full_name);
}
#ifdef CONFIG_PPC32
/* XXX fix this */
addr = (struct reg_property *) get_property(dev, "reg", &len);
if (addr == NULL || len < sizeof(*addr)) {
printk(KERN_WARNING "Can't use %s: no address\n",
dev->full_name);
return -ENODEV;
}
#endif
bus_range = (int *) get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
dev->full_name);
}
hose = pcibios_alloc_controller();
if (!hose)
return -ENOMEM;
hose->arch_data = dev;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
hose = pcibios_alloc_controller();
if (!hose)
return -ENOMEM;
hose->arch_data = dev;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
disp_name = NULL;
#ifdef CONFIG_POWER4
if (device_is_compatible(dev, "u3-agp")) {
setup_u3_agp(hose, addr);
disp_name = "U3-AGP";
primary = 0;
} else if (device_is_compatible(dev, "u3-ht")) {
setup_u3_ht(hose, addr);
disp_name = "U3-HT";
primary = 1;
} else
#endif /* CONFIG_POWER4 */
if (device_is_compatible(dev, "u3-agp")) {
setup_u3_agp(hose);
disp_name = "U3-AGP";
primary = 0;
} else if (device_is_compatible(dev, "u3-ht")) {
setup_u3_ht(hose);
disp_name = "U3-HT";
primary = 1;
}
printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
disp_name, hose->first_busno, hose->last_busno);
#else
if (device_is_compatible(dev, "uni-north")) {
primary = setup_uninorth(hose, addr);
disp_name = "UniNorth";
primary = setup_uninorth(hose, addr);
disp_name = "UniNorth";
} else if (strcmp(dev->name, "pci") == 0) {
/* XXX assume this is a mpc106 (grackle) */
setup_grackle(hose);
disp_name = "Grackle (MPC106)";
} else if (strcmp(dev->name, "bandit") == 0) {
setup_bandit(hose, addr);
disp_name = "Bandit";
} else if (strcmp(dev->name, "chaos") == 0) {
setup_chaos(hose, addr);
disp_name = "Chaos";
primary = 0;
}
printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. Firmware bus number: %d->%d\n",
disp_name, addr->address, hose->first_busno, hose->last_busno);
DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
hose, hose->cfg_addr, hose->cfg_data);
/* XXX assume this is a mpc106 (grackle) */
setup_grackle(hose);
disp_name = "Grackle (MPC106)";
} else if (strcmp(dev->name, "bandit") == 0) {
setup_bandit(hose, addr);
disp_name = "Bandit";
} else if (strcmp(dev->name, "chaos") == 0) {
setup_chaos(hose, addr);
disp_name = "Chaos";
primary = 0;
}
printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. Firmware bus number: %d->%d\n",
disp_name, addr->address, hose->first_busno, hose->last_busno);
#endif
DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
hose, hose->cfg_addr, hose->cfg_data);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, primary);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, primary);
/* Fixup "bus-range" OF property */
fixup_bus_range(dev);
/* Fixup "bus-range" OF property */
fixup_bus_range(dev);
return 0;
}
@ -968,14 +883,28 @@ pmac_pcibios_fixup(void)
pcibios_fixup_OF_interrupts();
}
void __init pmac_find_bridges(void)
#ifdef CONFIG_PPC64
static void __init pmac_fixup_phb_resources(void)
{
struct pci_controller *hose, *tmp;
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
hose->global_number,
hose->io_resource.start, hose->io_resource.end);
}
}
#endif
void __init pmac_pci_init(void)
{
struct device_node *np, *root;
struct device_node *ht = NULL;
root = of_find_node_by_path("/");
if (root == NULL) {
printk(KERN_CRIT "pmac_find_bridges: can't find root of device tree\n");
printk(KERN_CRIT "pmac_pci_init: can't find root "
"of device tree\n");
return;
}
for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
@ -994,22 +923,66 @@ void __init pmac_find_bridges(void)
}
of_node_put(root);
#ifdef CONFIG_PPC64
/* Probe HT last as it relies on the agp resources to be already
* setup
*/
if (ht && add_bridge(ht) != 0)
of_node_put(ht);
/*
* We need to call pci_setup_phb_io for the HT bridge first
* so it gets the I/O port numbers starting at 0, and we
* need to call it for the AGP bridge after that so it gets
* small positive I/O port numbers.
*/
if (u3_ht)
pci_setup_phb_io(u3_ht, 1);
if (u3_agp)
pci_setup_phb_io(u3_agp, 0);
/*
* On ppc64, fixup the IO resources on our host bridges as
* the common code does it only for children of the host bridges
*/
pmac_fixup_phb_resources();
/* Setup the linkage between OF nodes and PHBs */
pci_devs_phb_init();
/* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
* assume there is no P2P bridge on the AGP bus, which should be a
* safe assumptions hopefully.
*/
if (u3_agp) {
struct device_node *np = u3_agp->arch_data;
PCI_DN(np)->busno = 0xf0;
for (np = np->child; np; np = np->sibling)
PCI_DN(np)->busno = 0xf0;
}
/* map in PCI I/O space */
phbs_remap_io();
/* pmac_check_ht_link(); */
/* Tell pci.c to not use the common resource allocation mechanism */
pci_probe_only = 1;
/* Allow all IO */
io_page_mask = -1;
#else /* CONFIG_PPC64 */
init_p2pbridge();
fixup_nec_usb2();
/* We are still having some issues with the Xserve G4, enabling
* some offset between bus number and domains for now when we
* assign all busses should help for now
*/
if (pci_assign_all_buses)
pcibios_assign_bus_offset = 0x10;
#endif
}
int
@ -1037,7 +1010,7 @@ pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
uninorth_child = node->parent &&
device_is_compatible(node->parent, "uni-north");
/* Firewire & GMAC were disabled after PCI probe, the driver is
* claiming them, we must re-enable them now.
*/
@ -1057,7 +1030,7 @@ pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
if (updatecfg) {
u16 cmd;
/*
* Make sure PCI is correctly configured
*
@ -1069,10 +1042,12 @@ pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
* register the device.
*/
pci_read_config_word(dev, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
| PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
L1_CACHE_BYTES >> 2);
}
return 0;
@ -1081,8 +1056,7 @@ pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
/* We power down some devices after they have been probed. They'll
* be powered back on later on
*/
void __init
pmac_pcibios_after_init(void)
void __init pmac_pcibios_after_init(void)
{
struct device_node* nd;
@ -1125,84 +1099,6 @@ pmac_pcibios_after_init(void)
}
}
#ifdef CONFIG_PPC64
static void __init pmac_fixup_phb_resources(void)
{
struct pci_controller *hose, *tmp;
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
hose->io_resource.start += offset;
hose->io_resource.end += offset;
printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
hose->global_number,
hose->io_resource.start, hose->io_resource.end);
}
}
void __init pmac_pci_init(void)
{
struct device_node *np, *root;
struct device_node *ht = NULL;
/* Probe root PCI hosts, that is on U3 the AGP host and the
* HyperTransport host. That one is actually "kept" around
* and actually added last as it's resource management relies
* on the AGP resources to have been setup first
*/
root = of_find_node_by_path("/");
if (root == NULL) {
printk(KERN_CRIT "pmac_find_bridges: can't find root of device tree\n");
return;
}
for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
if (np->name == NULL)
continue;
if (strcmp(np->name, "pci") == 0) {
if (add_bridge(np) == 0)
of_node_get(np);
}
if (strcmp(np->name, "ht") == 0) {
of_node_get(np);
ht = np;
}
}
of_node_put(root);
/* Now setup the HyperTransport host if we found any
*/
if (ht && add_bridge(ht) != 0)
of_node_put(ht);
/* Fixup the IO resources on our host bridges as the common code
* does it only for childs of the host bridges
*/
pmac_fixup_phb_resources();
/* Setup the linkage between OF nodes and PHBs */
pci_devs_phb_init();
/* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
* assume there is no P2P bridge on the AGP bus, which should be a
* safe assumptions hopefully.
*/
if (u3_agp) {
struct device_node *np = u3_agp->arch_data;
PCI_DN(np)->busno = 0xf0;
for (np = np->child; np; np = np->sibling)
PCI_DN(np)->busno = 0xf0;
}
pmac_check_ht_link();
/* Tell pci.c to not use the common resource allocation mecanism */
pci_probe_only = 1;
/* Allow all IO */
io_page_mask = -1;
}
#endif
#ifdef CONFIG_PPC32
void pmac_pci_fixup_cardbus(struct pci_dev* dev)
{
@ -1217,7 +1113,7 @@ void pmac_pci_fixup_cardbus(struct pci_dev* dev)
if (dev->device == PCI_DEVICE_ID_TI_1130 ||
dev->device == PCI_DEVICE_ID_TI_1131) {
u8 val;
/* Enable PCI interrupt */
/* Enable PCI interrupt */
if (pci_read_config_byte(dev, 0x91, &val) == 0)
pci_write_config_byte(dev, 0x91, val | 0x30);
/* Disable ISA interrupt mode */

View File

@ -430,7 +430,6 @@ void __init pmac_pic_init(void)
printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
(unsigned int)irqctrler->addrs[0].address);
ppc_md.get_irq = mpic_get_irq;
pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler, 0, 0);
prom_get_irq_senses(senses, 0, 128);
@ -483,6 +482,7 @@ void __init pmac_pic_init(void)
* a Grand Central nor an OHare, then it's an Heathrow
* (or Paddington).
*/
ppc_md.get_irq = pmac_get_irq;
if (find_devices("gc"))
level_mask[0] = GC_LEVEL_MASK;
else if (find_devices("ohare")) {

View File

@ -19,7 +19,7 @@ extern int pmac_set_rtc_time(struct rtc_time *);
extern void pmac_read_rtc_time(void);
extern void pmac_calibrate_decr(void);
extern void pmac_pcibios_fixup(void);
extern void pmac_find_bridges(void);
extern void pmac_pci_init(void);
extern unsigned long pmac_ide_get_base(int index);
extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
unsigned long data_port, unsigned long ctrl_port, int *irq);
@ -41,7 +41,7 @@ extern unsigned long pmac_ide_get_base(int index);
extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
unsigned long data_port, unsigned long ctrl_port, int *irq);
extern void pmac_nvram_init(void);
extern int pmac_nvram_init(void);
extern struct hw_interrupt_type pmac_pic;

View File

@ -1,11 +1,11 @@
/*
* arch/ppc/platforms/setup.c
* Powermac setup and early boot code plus other random bits.
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Adapted for Power Macintosh by Paul Mackerras
* Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
* Copyright (C) 1996 Paul Mackerras (paulus@samba.org)
*
* Derived from "arch/alpha/kernel/setup.c"
* Copyright (C) 1995 Linus Torvalds
@ -65,13 +65,16 @@
#include <asm/mediabay.h>
#include <asm/machdep.h>
#include <asm/dma.h>
#include <asm/bootx.h>
#include <asm/cputable.h>
#include <asm/btext.h>
#include <asm/pmac_feature.h>
#include <asm/time.h>
#include <asm/of_device.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/smu.h>
#include <asm/pmc.h>
#include <asm/mpic.h>
#include "pmac.h"
@ -88,16 +91,24 @@ int pmac_newworld = 1;
static int current_root_goodness = -1;
extern int pmac_newworld;
extern struct machdep_calls pmac_md;
#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
extern void zs_kgdb_hook(int tty_num);
static void ohare_init(void);
#ifdef CONFIG_BOOTX_TEXT
static void pmac_progress(char *s, unsigned short hex);
#ifdef CONFIG_PPC64
#include <asm/udbg.h>
int sccdbg;
#endif
extern void zs_kgdb_hook(int tty_num);
sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
EXPORT_SYMBOL(sys_ctrler);
#ifdef CONFIG_PMAC_SMU
unsigned long smu_cmdbuf_abs;
EXPORT_SYMBOL(smu_cmdbuf_abs);
#endif
#ifdef CONFIG_SMP
extern struct smp_ops_t psurge_smp_ops;
@ -191,44 +202,69 @@ static void pmac_show_percpuinfo(struct seq_file *m, int i)
return;
}
#endif /* CONFIG_CPU_FREQ_PMAC */
#ifdef CONFIG_PPC32
of_show_percpuinfo(m, i);
#endif
}
#ifndef CONFIG_ADB_CUDA
int find_via_cuda(void)
{
if (!find_devices("via-cuda"))
return 0;
printk("WARNING ! Your machine is CUDA-based but your kernel\n");
printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n");
return 0;
}
#endif
#ifndef CONFIG_ADB_PMU
int find_via_pmu(void)
{
if (!find_devices("via-pmu"))
return 0;
printk("WARNING ! Your machine is PMU-based but your kernel\n");
printk(" wasn't compiled with CONFIG_ADB_PMU option !\n");
return;
}
#endif
#ifndef CONFIG_PMAC_SMU
int smu_init(void)
{
/* should check and warn if SMU is present */
return 0;
}
#endif
#ifdef CONFIG_PPC32
static volatile u32 *sysctrl_regs;
void __init
pmac_setup_arch(void)
static void __init ohare_init(void)
{
struct device_node *cpu;
int *fp;
unsigned long pvr;
pvr = PVR_VER(mfspr(SPRN_PVR));
/* Set loops_per_jiffy to a half-way reasonable value,
for use until calibrate_delay gets called. */
cpu = find_type_devices("cpu");
if (cpu != 0) {
fp = (int *) get_property(cpu, "clock-frequency", NULL);
if (fp != 0) {
if (pvr == 4 || pvr >= 8)
/* 604, G3, G4 etc. */
loops_per_jiffy = *fp / HZ;
else
/* 601, 603, etc. */
loops_per_jiffy = *fp / (2*HZ);
} else
loops_per_jiffy = 50000000 / HZ;
}
/* this area has the CPU identification register
and some registers used by smp boards */
sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000);
ohare_init();
/* Lookup PCI hosts */
pmac_find_bridges();
/*
* Turn on the L2 cache.
* We assume that we have a PSX memory controller iff
* we have an ohare I/O controller.
*/
if (find_devices("ohare") != NULL) {
if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) {
if (sysctrl_regs[4] & 0x10)
sysctrl_regs[4] |= 0x04000020;
else
sysctrl_regs[4] |= 0x04000000;
if(has_l2cache)
printk(KERN_INFO "Level 2 cache enabled\n");
}
}
}
static void __init l2cr_init(void)
{
/* Checks "l2cr-value" property in the registry */
if (cpu_has_feature(CPU_FTR_L2CR)) {
struct device_node *np = find_devices("cpus");
@ -247,68 +283,90 @@ pmac_setup_arch(void)
}
if (ppc_override_l2cr)
printk(KERN_INFO "L2CR overriden (0x%x), backside cache is %s\n",
ppc_override_l2cr_value, (ppc_override_l2cr_value & 0x80000000)
printk(KERN_INFO "L2CR overridden (0x%x), "
"backside cache is %s\n",
ppc_override_l2cr_value,
(ppc_override_l2cr_value & 0x80000000)
? "enabled" : "disabled");
}
#endif
void __init pmac_setup_arch(void)
{
struct device_node *cpu;
int *fp;
unsigned long pvr;
pvr = PVR_VER(mfspr(SPRN_PVR));
/* Set loops_per_jiffy to a half-way reasonable value,
for use until calibrate_delay gets called. */
loops_per_jiffy = 50000000 / HZ;
cpu = of_find_node_by_type(NULL, "cpu");
if (cpu != NULL) {
fp = (int *) get_property(cpu, "clock-frequency", NULL);
if (fp != NULL) {
if (pvr >= 0x30 && pvr < 0x80)
/* PPC970 etc. */
loops_per_jiffy = *fp / (3 * HZ);
else if (pvr == 4 || pvr >= 8)
/* 604, G3, G4 etc. */
loops_per_jiffy = *fp / HZ;
else
/* 601, 603, etc. */
loops_per_jiffy = *fp / (2 * HZ);
}
of_node_put(cpu);
}
/* Lookup PCI hosts */
pmac_pci_init();
#ifdef CONFIG_PPC32
ohare_init();
l2cr_init();
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
/* Probe motherboard chipset */
/* this is done earlier in setup_arch for 32-bit */
pmac_feature_init();
/* We can NAP */
powersave_nap = 1;
printk(KERN_INFO "Using native/NAP idle loop\n");
#endif
#ifdef CONFIG_KGDB
zs_kgdb_hook(0);
#endif
#ifdef CONFIG_ADB_CUDA
find_via_cuda();
#else
if (find_devices("via-cuda")) {
printk("WARNING ! Your machine is Cuda based but your kernel\n");
printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n");
}
#endif
#ifdef CONFIG_ADB_PMU
find_via_pmu();
#else
if (find_devices("via-pmu")) {
printk("WARNING ! Your machine is PMU based but your kernel\n");
printk(" wasn't compiled with CONFIG_ADB_PMU option !\n");
}
#endif
smu_init();
#ifdef CONFIG_NVRAM
pmac_nvram_init();
#endif
#ifdef CONFIG_PPC32
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start)
ROOT_DEV = Root_RAM0;
else
#endif
ROOT_DEV = DEFAULT_ROOT_DEVICE;
#endif
#ifdef CONFIG_SMP
/* Check for Core99 */
if (find_devices("uni-n") || find_devices("u3"))
smp_ops = &core99_smp_ops;
#ifdef CONFIG_PPC32
else
smp_ops = &psurge_smp_ops;
#endif
#endif /* CONFIG_SMP */
pci_create_OF_bus_map();
}
static void __init ohare_init(void)
{
/*
* Turn on the L2 cache.
* We assume that we have a PSX memory controller iff
* we have an ohare I/O controller.
*/
if (find_devices("ohare") != NULL) {
if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) {
if (sysctrl_regs[4] & 0x10)
sysctrl_regs[4] |= 0x04000020;
else
sysctrl_regs[4] |= 0x04000000;
if(has_l2cache)
printk(KERN_INFO "Level 2 cache enabled\n");
}
}
}
char *bootpath;
@ -319,8 +377,7 @@ int boot_part;
extern dev_t boot_dev;
#ifdef CONFIG_SCSI
void __init
note_scsi_host(struct device_node *node, void *host)
void __init note_scsi_host(struct device_node *node, void *host)
{
int l;
char *p;
@ -351,8 +408,7 @@ EXPORT_SYMBOL(note_scsi_host);
#endif
#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
static dev_t __init
find_ide_boot(void)
static dev_t __init find_ide_boot(void)
{
char *p;
int n;
@ -369,15 +425,13 @@ find_ide_boot(void)
}
#endif /* CONFIG_BLK_DEV_IDE && CONFIG_BLK_DEV_IDE_PMAC */
static void __init
find_boot_device(void)
static void __init find_boot_device(void)
{
#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
boot_dev = find_ide_boot();
#endif
}
static int initializing = 1;
/* TODO: Merge the suspend-to-ram with the common code !!!
* currently, this is a stub implementation for suspend-to-disk
* only
@ -428,6 +482,8 @@ static struct pm_ops pmac_pm_ops = {
#endif /* CONFIG_SOFTWARE_SUSPEND */
static int initializing = 1;
static int pmac_late_init(void)
{
initializing = 0;
@ -440,8 +496,7 @@ static int pmac_late_init(void)
late_initcall(pmac_late_init);
/* can't be __init - can be called whenever a disk is first accessed */
void
note_bootable_part(dev_t dev, int part, int goodness)
void note_bootable_part(dev_t dev, int part, int goodness)
{
static int found_boot = 0;
char *p;
@ -466,52 +521,68 @@ note_bootable_part(dev_t dev, int part, int goodness)
}
}
static void
pmac_restart(char *cmd)
#ifdef CONFIG_ADB_CUDA
static void cuda_restart(void)
{
#ifdef CONFIG_ADB_CUDA
struct adb_request req;
#endif /* CONFIG_ADB_CUDA */
cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM);
for (;;)
cuda_poll();
}
static void cuda_shutdown(void)
{
struct adb_request req;
cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN);
for (;;)
cuda_poll();
}
#else
#define cuda_restart()
#define cuda_shutdown()
#endif
#ifndef CONFIG_ADB_PMU
#define pmu_restart()
#define pmu_shutdown()
#endif
#ifndef CONFIG_PMAC_SMU
#define smu_restart()
#define smu_shutdown()
#endif
static void pmac_restart(char *cmd)
{
switch (sys_ctrler) {
#ifdef CONFIG_ADB_CUDA
case SYS_CTRLER_CUDA:
cuda_request(&req, NULL, 2, CUDA_PACKET,
CUDA_RESET_SYSTEM);
for (;;)
cuda_poll();
cuda_restart();
break;
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU
case SYS_CTRLER_PMU:
pmu_restart();
break;
#endif /* CONFIG_ADB_PMU */
case SYS_CTRLER_SMU:
smu_restart();
break;
default: ;
}
}
static void
pmac_power_off(void)
static void pmac_power_off(void)
{
#ifdef CONFIG_ADB_CUDA
struct adb_request req;
#endif /* CONFIG_ADB_CUDA */
switch (sys_ctrler) {
#ifdef CONFIG_ADB_CUDA
case SYS_CTRLER_CUDA:
cuda_request(&req, NULL, 2, CUDA_PACKET,
CUDA_POWERDOWN);
for (;;)
cuda_poll();
cuda_shutdown();
break;
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU
case SYS_CTRLER_PMU:
pmu_shutdown();
break;
#endif /* CONFIG_ADB_PMU */
case SYS_CTRLER_SMU:
smu_shutdown();
break;
default: ;
}
}
@ -522,37 +593,17 @@ pmac_halt(void)
pmac_power_off();
}
#ifdef CONFIG_PPC32
void __init pmac_init(void)
{
/* isa_io_base gets set in pmac_find_bridges */
/* isa_io_base gets set in pmac_pci_init */
isa_mem_base = PMAC_ISA_MEM_BASE;
pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 1;
DMA_MODE_WRITE = 2;
ppc_md.setup_arch = pmac_setup_arch;
ppc_md.show_cpuinfo = pmac_show_cpuinfo;
ppc_md.show_percpuinfo = pmac_show_percpuinfo;
ppc_md.init_IRQ = pmac_pic_init;
ppc_md.get_irq = pmac_get_irq; /* Changed later on ... */
ppc_md.pcibios_fixup = pmac_pcibios_fixup;
ppc_md.pcibios_enable_device_hook = pmac_pci_enable_device_hook;
ppc_md.pcibios_after_init = pmac_pcibios_after_init;
ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
ppc_md.restart = pmac_restart;
ppc_md.power_off = pmac_power_off;
ppc_md.halt = pmac_halt;
ppc_md.time_init = pmac_time_init;
ppc_md.set_rtc_time = pmac_set_rtc_time;
ppc_md.get_rtc_time = pmac_get_rtc_time;
ppc_md.get_boot_time = pmac_get_boot_time;
ppc_md.calibrate_decr = pmac_calibrate_decr;
ppc_md.feature_call = pmac_do_feature_call;
ppc_md = pmac_md;
#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
#ifdef CONFIG_BLK_DEV_IDE_PMAC
@ -561,27 +612,62 @@ void __init pmac_init(void)
#endif /* CONFIG_BLK_DEV_IDE_PMAC */
#endif /* defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) */
#ifdef CONFIG_BOOTX_TEXT
ppc_md.progress = pmac_progress;
#endif /* CONFIG_BOOTX_TEXT */
if (ppc_md.progress) ppc_md.progress("pmac_init(): exit", 0);
}
#endif
#ifdef CONFIG_BOOTX_TEXT
static void __init
pmac_progress(char *s, unsigned short hex)
/*
* Early initialization.
*/
static void __init pmac_init_early(void)
{
#ifdef CONFIG_PPC64
/* Initialize hash table, from now on, we can take hash faults
* and call ioremap
*/
hpte_init_native();
/* Init SCC */
if (strstr(cmd_line, "sccdbg")) {
sccdbg = 1;
udbg_init_scc(NULL);
}
/* Setup interrupt mapping options */
ppc64_interrupt_controller = IC_OPEN_PIC;
iommu_init_early_u3();
#endif
}
static void __init pmac_progress(char *s, unsigned short hex)
{
#ifdef CONFIG_PPC64
if (sccdbg) {
udbg_puts(s);
udbg_puts("\n");
return;
}
#endif
#ifdef CONFIG_BOOTX_TEXT
if (boot_text_mapped) {
btext_drawstring(s);
btext_drawchar('\n');
}
}
#endif /* CONFIG_BOOTX_TEXT */
}
static int __init
pmac_declare_of_platform_devices(void)
/*
* pmac has no legacy IO, anything calling this function has to
* fail or bad things will happen
*/
static int pmac_check_legacy_ioport(unsigned int baseport)
{
return -ENODEV;
}
static int __init pmac_declare_of_platform_devices(void)
{
struct device_node *np;
@ -594,6 +680,13 @@ pmac_declare_of_platform_devices(void)
break;
}
}
np = find_devices("valkyrie");
if (np)
of_platform_device_create(np, "valkyrie", NULL);
np = find_devices("platinum");
if (np)
of_platform_device_create(np, "platinum", NULL);
np = find_devices("u3");
if (np) {
for (np = np->child; np != NULL; np = np->sibling)
@ -603,15 +696,92 @@ pmac_declare_of_platform_devices(void)
break;
}
}
np = find_devices("valkyrie");
if (np)
of_platform_device_create(np, "valkyrie", NULL);
np = find_devices("platinum");
if (np)
of_platform_device_create(np, "platinum", NULL);
np = of_find_node_by_type(NULL, "smu");
if (np) {
of_platform_device_create(np, "smu", NULL);
of_node_put(np);
}
return 0;
}
device_initcall(pmac_declare_of_platform_devices);
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
static int __init pmac_probe(int platform)
{
#ifdef CONFIG_PPC64
if (platform != PLATFORM_POWERMAC)
return 0;
/*
* On U3, the DART (iommu) must be allocated now since it
* has an impact on htab_initialize (due to the large page it
* occupies having to be broken up so the DART itself is not
* part of the cacheable linar mapping
*/
alloc_u3_dart_table();
#endif
#ifdef CONFIG_PMAC_SMU
/*
* SMU based G5s need some memory below 2Gb, at least the current
* driver needs that. We have to allocate it now. We allocate 4k
* (1 small page) for now.
*/
smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
#endif /* CONFIG_PMAC_SMU */
return 1;
}
#ifdef CONFIG_PPC64
static int pmac_probe_mode(struct pci_bus *bus)
{
struct device_node *node = bus->sysdata;
/* We need to use normal PCI probing for the AGP bus,
since the device for the AGP bridge isn't in the tree. */
if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
return PCI_PROBE_NORMAL;
return PCI_PROBE_DEVTREE;
}
#endif
struct machdep_calls __initdata pmac_md = {
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64)
.cpu_die = generic_mach_cpu_die,
#endif
.probe = pmac_probe,
.setup_arch = pmac_setup_arch,
.init_early = pmac_init_early,
.show_cpuinfo = pmac_show_cpuinfo,
.show_percpuinfo = pmac_show_percpuinfo,
.init_IRQ = pmac_pic_init,
.get_irq = mpic_get_irq, /* changed later */
.pcibios_fixup = pmac_pcibios_fixup,
.restart = pmac_restart,
.power_off = pmac_power_off,
.halt = pmac_halt,
.time_init = pmac_time_init,
.get_boot_time = pmac_get_boot_time,
.set_rtc_time = pmac_set_rtc_time,
.get_rtc_time = pmac_get_rtc_time,
.calibrate_decr = pmac_calibrate_decr,
.feature_call = pmac_do_feature_call,
.check_legacy_ioport = pmac_check_legacy_ioport,
.progress = pmac_progress,
#ifdef CONFIG_PPC64
.pci_probe_mode = pmac_probe_mode,
.idle_loop = native_idle,
.enable_pmcs = power4_enable_pmcs,
#endif
#ifdef CONFIG_PPC32
.pcibios_enable_device_hook = pmac_pci_enable_device_hook,
.pcibios_after_init = pmac_pcibios_after_init,
.phys_mem_access_prot = pci_phys_mem_access_prot,
#endif
};

View File

@ -44,20 +44,33 @@
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/residual.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/time.h>
#include <asm/mpic.h>
#include <asm/cacheflush.h>
#include <asm/keylargo.h>
#include <asm/pmac_low_i2c.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
extern void __secondary_start_pmac_0(void);
#ifdef CONFIG_PPC32
/* Sync flag for HW tb sync */
static volatile int sec_tb_reset = 0;
/*
* Powersurge (old powermac SMP) support.
*/
extern void __secondary_start_pmac_0(void);
/* Addresses for powersurge registers */
#define HAMMERHEAD_BASE 0xf8000000
#define HHEAD_CONFIG 0x90
@ -106,47 +119,6 @@ static volatile u32 __iomem *psurge_start;
/* what sort of powersurge board we have */
static int psurge_type = PSURGE_NONE;
/* L2 and L3 cache settings to pass from CPU0 to CPU1 */
volatile static long int core99_l2_cache;
volatile static long int core99_l3_cache;
/* Timebase freeze GPIO */
static unsigned int core99_tb_gpio;
/* Sync flag for HW tb sync */
static volatile int sec_tb_reset = 0;
static unsigned int pri_tb_hi, pri_tb_lo;
static unsigned int pri_tb_stamp;
static void __devinit core99_init_caches(int cpu)
{
if (!cpu_has_feature(CPU_FTR_L2CR))
return;
if (cpu == 0) {
core99_l2_cache = _get_L2CR();
printk("CPU0: L2CR is %lx\n", core99_l2_cache);
} else {
printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
_set_L2CR(0);
_set_L2CR(core99_l2_cache);
printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
}
if (!cpu_has_feature(CPU_FTR_L3CR))
return;
if (cpu == 0){
core99_l3_cache = _get_L3CR();
printk("CPU0: L3CR is %lx\n", core99_l3_cache);
} else {
printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
_set_L3CR(0);
_set_L3CR(core99_l3_cache);
printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
}
}
/*
* Set and clear IPIs for powersurge.
*/
@ -436,65 +408,346 @@ void __init smp_psurge_give_timebase(void)
/* Dummy implementation */
}
static int __init smp_core99_probe(void)
/* PowerSurge-style Macs */
struct smp_ops_t psurge_smp_ops = {
.message_pass = smp_psurge_message_pass,
.probe = smp_psurge_probe,
.kick_cpu = smp_psurge_kick_cpu,
.setup_cpu = smp_psurge_setup_cpu,
.give_timebase = smp_psurge_give_timebase,
.take_timebase = smp_psurge_take_timebase,
};
#endif /* CONFIG_PPC32 - actually powersurge support */
#ifdef CONFIG_PPC64
/*
* G5s enable/disable the timebase via an i2c-connected clock chip.
*/
static struct device_node *pmac_tb_clock_chip_host;
static u8 pmac_tb_pulsar_addr;
static void (*pmac_tb_freeze)(int freeze);
static DEFINE_SPINLOCK(timebase_lock);
static unsigned long timebase;
static void smp_core99_cypress_tb_freeze(int freeze)
{
#ifdef CONFIG_6xx
extern int powersave_nap;
#endif
struct device_node *cpus, *firstcpu;
int i, ncpus = 0, boot_cpu = -1;
u32 *tbprop = NULL;
u8 data;
int rc;
if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
cpus = firstcpu = find_type_devices("cpu");
while(cpus != NULL) {
u32 *regprop = (u32 *)get_property(cpus, "reg", NULL);
char *stateprop = (char *)get_property(cpus, "state", NULL);
if (regprop != NULL && stateprop != NULL &&
!strncmp(stateprop, "running", 7))
boot_cpu = *regprop;
++ncpus;
cpus = cpus->next;
/* Strangely, the device-tree says address is 0xd2, but darwin
* accesses 0xd0 ...
*/
pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
0xd0 | pmac_low_i2c_read,
0x81, &data, 1);
if (rc != 0)
goto bail;
data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
0xd0 | pmac_low_i2c_write,
0x81, &data, 1);
bail:
if (rc != 0) {
printk("Cypress Timebase %s rc: %d\n",
freeze ? "freeze" : "unfreeze", rc);
panic("Timebase freeze failed !\n");
}
if (boot_cpu == -1)
printk(KERN_WARNING "Couldn't detect boot CPU !\n");
if (boot_cpu != 0)
printk(KERN_WARNING "Boot CPU is %d, unsupported setup !\n", boot_cpu);
}
if (machine_is_compatible("MacRISC4")) {
extern struct smp_ops_t core99_smp_ops;
core99_smp_ops.take_timebase = smp_generic_take_timebase;
core99_smp_ops.give_timebase = smp_generic_give_timebase;
static void smp_core99_pulsar_tb_freeze(int freeze)
{
u8 data;
int rc;
pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
pmac_tb_pulsar_addr | pmac_low_i2c_read,
0x2e, &data, 1);
if (rc != 0)
goto bail;
data = (data & 0x88) | (freeze ? 0x11 : 0x22);
pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
pmac_tb_pulsar_addr | pmac_low_i2c_write,
0x2e, &data, 1);
bail:
if (rc != 0) {
printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
freeze ? "freeze" : "unfreeze", rc);
panic("Timebase freeze failed !\n");
}
}
static void smp_core99_give_timebase(void)
{
/* Open i2c bus for synchronous access */
if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
panic("Can't open i2c for TB sync !\n");
spin_lock(&timebase_lock);
(*pmac_tb_freeze)(1);
mb();
timebase = get_tb();
spin_unlock(&timebase_lock);
while (timebase)
barrier();
spin_lock(&timebase_lock);
(*pmac_tb_freeze)(0);
spin_unlock(&timebase_lock);
/* Close i2c bus */
pmac_low_i2c_close(pmac_tb_clock_chip_host);
}
static void __devinit smp_core99_take_timebase(void)
{
while (!timebase)
barrier();
spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
spin_unlock(&timebase_lock);
}
static void __init smp_core99_setup(int ncpus)
{
struct device_node *cc = NULL;
struct device_node *p;
u32 *reg;
int ok;
/* HW sync only on these platforms */
if (!machine_is_compatible("PowerMac7,2") &&
!machine_is_compatible("PowerMac7,3") &&
!machine_is_compatible("RackMac3,1"))
return;
/* Look for the clock chip */
while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) {
p = of_get_parent(cc);
ok = p && device_is_compatible(p, "uni-n-i2c");
of_node_put(p);
if (!ok)
continue;
reg = (u32 *)get_property(cc, "reg", NULL);
if (reg == NULL)
continue;
switch (*reg) {
case 0xd2:
if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
pmac_tb_pulsar_addr = 0xd2;
printk(KERN_INFO "Timebase clock is Pulsar chip\n");
} else if (device_is_compatible(cc, "cy28508")) {
pmac_tb_freeze = smp_core99_cypress_tb_freeze;
printk(KERN_INFO "Timebase clock is Cypress chip\n");
}
break;
case 0xd4:
pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
pmac_tb_pulsar_addr = 0xd4;
printk(KERN_INFO "Timebase clock is Pulsar chip\n");
break;
}
if (pmac_tb_freeze != NULL) {
pmac_tb_clock_chip_host = of_get_parent(cc);
of_node_put(cc);
break;
}
}
if (pmac_tb_freeze == NULL) {
smp_ops->give_timebase = smp_generic_give_timebase;
smp_ops->take_timebase = smp_generic_take_timebase;
}
}
/* nothing to do here, caches are already set up by service processor */
static inline void __devinit core99_init_caches(int cpu)
{
}
#else /* CONFIG_PPC64 */
/*
* SMP G4 powermacs use a GPIO to enable/disable the timebase.
*/
static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
static unsigned int pri_tb_hi, pri_tb_lo;
static unsigned int pri_tb_stamp;
/* not __init, called in sleep/wakeup code */
void smp_core99_give_timebase(void)
{
unsigned long flags;
unsigned int t;
/* wait for the secondary to be in take_timebase */
for (t = 100000; t > 0 && !sec_tb_reset; --t)
udelay(10);
if (!sec_tb_reset) {
printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
return;
}
/* freeze the timebase and read it */
/* disable interrupts so the timebase is disabled for the
shortest possible time */
local_irq_save(flags);
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
mb();
pri_tb_hi = get_tbu();
pri_tb_lo = get_tbl();
pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
mb();
/* tell the secondary we're ready */
sec_tb_reset = 2;
mb();
/* wait for the secondary to have taken it */
for (t = 100000; t > 0 && sec_tb_reset; --t)
udelay(10);
if (sec_tb_reset)
printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
else
smp_tb_synchronized = 1;
/* Now, restart the timebase by leaving the GPIO to an open collector */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
local_irq_restore(flags);
}
/* not __init, called in sleep/wakeup code */
void smp_core99_take_timebase(void)
{
unsigned long flags;
/* tell the primary we're here */
sec_tb_reset = 1;
mb();
/* wait for the primary to set pri_tb_hi/lo */
while (sec_tb_reset < 2)
mb();
/* set our stuff the same as the primary */
local_irq_save(flags);
set_dec(1);
set_tb(pri_tb_hi, pri_tb_lo);
last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
mb();
/* tell the primary we're done */
sec_tb_reset = 0;
mb();
local_irq_restore(flags);
}
/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
volatile static long int core99_l2_cache;
volatile static long int core99_l3_cache;
static void __devinit core99_init_caches(int cpu)
{
if (!cpu_has_feature(CPU_FTR_L2CR))
return;
if (cpu == 0) {
core99_l2_cache = _get_L2CR();
printk("CPU0: L2CR is %lx\n", core99_l2_cache);
} else {
if (firstcpu != NULL)
tbprop = (u32 *)get_property(firstcpu, "timebase-enable", NULL);
printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
_set_L2CR(0);
_set_L2CR(core99_l2_cache);
printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
}
if (!cpu_has_feature(CPU_FTR_L3CR))
return;
if (cpu == 0){
core99_l3_cache = _get_L3CR();
printk("CPU0: L3CR is %lx\n", core99_l3_cache);
} else {
printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
_set_L3CR(0);
_set_L3CR(core99_l3_cache);
printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
}
}
static void __init smp_core99_setup(int ncpus)
{
struct device_node *cpu;
u32 *tbprop = NULL;
int i;
core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
cpu = of_find_node_by_type(NULL, "cpu");
if (cpu != NULL) {
tbprop = (u32 *)get_property(cpu, "timebase-enable", NULL);
if (tbprop)
core99_tb_gpio = *tbprop;
else
core99_tb_gpio = KL_GPIO_TB_ENABLE;
of_node_put(cpu);
}
if (ncpus > 1) {
mpic_request_ipis();
for (i = 1; i < ncpus; ++i)
smp_hw_index[i] = i;
#ifdef CONFIG_6xx
powersave_nap = 0;
/* XXX should get this from reg properties */
for (i = 1; i < ncpus; ++i)
smp_hw_index[i] = i;
powersave_nap = 0;
}
#endif
core99_init_caches(0);
}
static int __init smp_core99_probe(void)
{
struct device_node *cpus;
int ncpus = 0;
if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
/* Count CPUs in the device-tree */
for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
++ncpus;
printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
/* Nothing more to do if less than 2 of them */
if (ncpus <= 1)
return 1;
smp_core99_setup(ncpus);
mpic_request_ipis();
core99_init_caches(0);
return ncpus;
}
static void __devinit smp_core99_kick_cpu(int nr)
{
unsigned long save_vector, new_vector;
unsigned int save_vector;
unsigned long new_vector;
unsigned long flags;
volatile unsigned int *vector
= ((volatile unsigned int *)(KERNELBASE+0x100));
volatile unsigned long *vector
= ((volatile unsigned long *)(KERNELBASE+0x100));
if (nr < 0 || nr > 3)
return;
if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
@ -556,113 +809,10 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
}
}
/* not __init, called in sleep/wakeup code */
void smp_core99_take_timebase(void)
{
unsigned long flags;
/* tell the primary we're here */
sec_tb_reset = 1;
mb();
/* wait for the primary to set pri_tb_hi/lo */
while (sec_tb_reset < 2)
mb();
/* set our stuff the same as the primary */
local_irq_save(flags);
set_dec(1);
set_tb(pri_tb_hi, pri_tb_lo);
last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
mb();
/* tell the primary we're done */
sec_tb_reset = 0;
mb();
local_irq_restore(flags);
}
/* not __init, called in sleep/wakeup code */
void smp_core99_give_timebase(void)
{
unsigned long flags;
unsigned int t;
/* wait for the secondary to be in take_timebase */
for (t = 100000; t > 0 && !sec_tb_reset; --t)
udelay(10);
if (!sec_tb_reset) {
printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
return;
}
/* freeze the timebase and read it */
/* disable interrupts so the timebase is disabled for the
shortest possible time */
local_irq_save(flags);
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
mb();
pri_tb_hi = get_tbu();
pri_tb_lo = get_tbl();
pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
mb();
/* tell the secondary we're ready */
sec_tb_reset = 2;
mb();
/* wait for the secondary to have taken it */
for (t = 100000; t > 0 && sec_tb_reset; --t)
udelay(10);
if (sec_tb_reset)
printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
else
smp_tb_synchronized = 1;
/* Now, restart the timebase by leaving the GPIO to an open collector */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
local_irq_restore(flags);
}
void smp_core99_message_pass(int target, int msg)
{
cpumask_t mask = CPU_MASK_ALL;
/* make sure we're sending something that translates to an IPI */
if (msg > 0x3) {
printk("SMP %d: smp_message_pass: unknown msg %d\n",
smp_processor_id(), msg);
return;
}
switch (target) {
case MSG_ALL:
mpic_send_ipi(msg, cpus_addr(mask)[0]);
break;
case MSG_ALL_BUT_SELF:
cpu_clear(smp_processor_id(), mask);
mpic_send_ipi(msg, cpus_addr(mask)[0]);
break;
default:
mpic_send_ipi(msg, 1 << target);
break;
}
}
/* PowerSurge-style Macs */
struct smp_ops_t psurge_smp_ops = {
.message_pass = smp_psurge_message_pass,
.probe = smp_psurge_probe,
.kick_cpu = smp_psurge_kick_cpu,
.setup_cpu = smp_psurge_setup_cpu,
.give_timebase = smp_psurge_give_timebase,
.take_timebase = smp_psurge_take_timebase,
};
/* Core99 Macs (dual G4s) */
/* Core99 Macs (dual G4s and G5s) */
struct smp_ops_t core99_smp_ops = {
.message_pass = smp_core99_message_pass,
.message_pass = smp_mpic_message_pass,
.probe = smp_core99_probe,
.kick_cpu = smp_core99_kick_cpu,
.setup_cpu = smp_core99_setup_cpu,
@ -670,7 +820,7 @@ struct smp_ops_t core99_smp_ops = {
.take_timebase = smp_core99_take_timebase,
};
#ifdef CONFIG_HOTPLUG_CPU
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
int __cpu_disable(void)
{
@ -685,7 +835,7 @@ int __cpu_disable(void)
return 0;
}
extern void low_cpu_die(void) __attribute__((noreturn)); /* in pmac_sleep.S */
extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */
static int cpu_dead[NR_CPUS];
void cpu_die(void)

View File

@ -33,6 +33,7 @@
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/nvram.h>
#include <asm/smu.h>
#undef DEBUG
@ -68,8 +69,8 @@
long __init pmac_time_init(void)
{
#ifdef CONFIG_NVRAM
s32 delta = 0;
#ifdef CONFIG_NVRAM
int dst;
delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
@ -80,110 +81,181 @@ long __init pmac_time_init(void)
dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
dst ? "on" : "off");
return delta;
#else
return 0;
#endif
return delta;
}
static void to_rtc_time(unsigned long now, struct rtc_time *tm)
{
to_tm(now, tm);
tm->tm_year -= 1900;
tm->tm_mon -= 1;
}
static unsigned long from_rtc_time(struct rtc_time *tm)
{
return mktime(tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
}
#ifdef CONFIG_ADB_CUDA
static unsigned long cuda_get_time(void)
{
struct adb_request req;
unsigned long now;
if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
return 0;
while (!req.complete)
cuda_poll();
if (req.reply_len != 7)
printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
req.reply_len);
now = (req.reply[3] << 24) + (req.reply[4] << 16)
+ (req.reply[5] << 8) + req.reply[6];
if (now < RTC_OFFSET)
return 0;
return now - RTC_OFFSET;
}
#define cuda_get_rtc_time(tm) to_rtc_time(cuda_get_time(), (tm))
static int cuda_set_rtc_time(struct rtc_time *tm)
{
unsigned int nowtime;
struct adb_request req;
nowtime = from_rtc_time(tm) + RTC_OFFSET;
if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
nowtime >> 24, nowtime >> 16, nowtime >> 8,
nowtime) < 0)
return -ENXIO;
while (!req.complete)
cuda_poll();
if ((req.reply_len != 3) && (req.reply_len != 7))
printk(KERN_ERR "cuda_set_rtc_time: got %d byte reply\n",
req.reply_len);
return 0;
}
#else
#define cuda_get_time() 0
#define cuda_get_rtc_time(tm)
#define cuda_set_rtc_time(tm) 0
#endif
#ifdef CONFIG_ADB_PMU
static unsigned long pmu_get_time(void)
{
struct adb_request req;
unsigned long now;
if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
return 0;
pmu_wait_complete(&req);
if (req.reply_len != 4)
printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
req.reply_len);
now = (req.reply[0] << 24) + (req.reply[1] << 16)
+ (req.reply[2] << 8) + req.reply[3];
if (now < RTC_OFFSET)
return 0;
return now - RTC_OFFSET;
}
#define pmu_get_rtc_time(tm) to_rtc_time(pmu_get_time(), (tm))
static int pmu_set_rtc_time(struct rtc_time *tm)
{
unsigned int nowtime;
struct adb_request req;
nowtime = from_rtc_time(tm) + RTC_OFFSET;
if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
nowtime >> 16, nowtime >> 8, nowtime) < 0)
return -ENXIO;
pmu_wait_complete(&req);
if (req.reply_len != 0)
printk(KERN_ERR "pmu_set_rtc_time: %d byte reply from PMU\n",
req.reply_len);
return 0;
}
#else
#define pmu_get_time() 0
#define pmu_get_rtc_time(tm)
#define pmu_set_rtc_time(tm) 0
#endif
#ifdef CONFIG_PMAC_SMU
static unsigned long smu_get_time(void)
{
struct rtc_time tm;
if (smu_get_rtc_time(&tm, 1))
return 0;
return from_rtc_time(&tm);
}
#else
#define smu_get_time() 0
#define smu_get_rtc_time(tm, spin)
#define smu_set_rtc_time(tm, spin) 0
#endif
unsigned long pmac_get_boot_time(void)
{
#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
struct adb_request req;
unsigned long now;
#endif
/* Get the time from the RTC */
/* Get the time from the RTC, used only at boot time */
switch (sys_ctrler) {
#ifdef CONFIG_ADB_CUDA
case SYS_CTRLER_CUDA:
if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
return 0;
while (!req.complete)
cuda_poll();
if (req.reply_len != 7)
printk(KERN_ERR "pmac_get_rtc_time: got %d byte reply\n",
req.reply_len);
now = (req.reply[3] << 24) + (req.reply[4] << 16)
+ (req.reply[5] << 8) + req.reply[6];
return now - RTC_OFFSET;
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU
return cuda_get_time();
case SYS_CTRLER_PMU:
if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
return 0;
while (!req.complete)
pmu_poll();
if (req.reply_len != 4)
printk(KERN_ERR "pmac_get_rtc_time: got %d byte reply\n",
req.reply_len);
now = (req.reply[0] << 24) + (req.reply[1] << 16)
+ (req.reply[2] << 8) + req.reply[3];
return now - RTC_OFFSET;
#endif /* CONFIG_ADB_PMU */
default: ;
}
return 0;
}
void pmac_get_rtc_time(struct rtc_time *tm)
{
unsigned long now;
now = pmac_get_boot_time();
to_tm(now, tm);
tm->tm_year -= 1900;
tm->tm_mon -= 1; /* month is 0-based */
}
int pmac_set_rtc_time(struct rtc_time *tm)
{
unsigned long nowtime;
#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
struct adb_request req;
#endif
nowtime = mktime(tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
nowtime += RTC_OFFSET;
switch (sys_ctrler) {
#ifdef CONFIG_ADB_CUDA
case SYS_CTRLER_CUDA:
if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
nowtime >> 24, nowtime >> 16, nowtime >> 8,
nowtime) < 0)
return 0;
while (!req.complete)
cuda_poll();
if ((req.reply_len != 3) && (req.reply_len != 7))
printk(KERN_ERR "pmac_set_rtc_time: got %d byte reply\n",
req.reply_len);
return 1;
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU
case SYS_CTRLER_PMU:
if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
nowtime >> 24, nowtime >> 16, nowtime >> 8, nowtime) < 0)
return 0;
while (!req.complete)
pmu_poll();
if (req.reply_len != 0)
printk(KERN_ERR "pmac_set_rtc_time: got %d byte reply\n",
req.reply_len);
return 1;
#endif /* CONFIG_ADB_PMU */
return pmu_get_time();
case SYS_CTRLER_SMU:
return smu_get_time();
default:
return 0;
}
}
void pmac_get_rtc_time(struct rtc_time *tm)
{
/* Get the time from the RTC, used only at boot time */
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
cuda_get_rtc_time(tm);
break;
case SYS_CTRLER_PMU:
pmu_get_rtc_time(tm);
break;
case SYS_CTRLER_SMU:
smu_get_rtc_time(tm, 1);
break;
default:
;
}
}
int pmac_set_rtc_time(struct rtc_time *tm)
{
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
return cuda_set_rtc_time(tm);
case SYS_CTRLER_PMU:
return pmu_set_rtc_time(tm);
case SYS_CTRLER_SMU:
return smu_set_rtc_time(tm, 1);
default:
return -ENODEV;
}
}
#ifdef CONFIG_PPC32
/*
* Calibrate the decrementer register using VIA timer 1.
* This is used both on powermacs and CHRP machines.
*/
int __init
via_calibrate_decr(void)
int __init via_calibrate_decr(void)
{
struct device_node *vias;
volatile unsigned char __iomem *via;
@ -217,15 +289,12 @@ via_calibrate_decr(void)
dend = get_dec();
ppc_tb_freq = (dstart - dend) * 100 / 6;
tb_ticks_per_jiffy = (dstart - dend) / ((6 * HZ)/100);
printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %lu (%u ticks)\n",
tb_ticks_per_jiffy, dstart - dend);
iounmap(via);
return 1;
}
#endif
#ifdef CONFIG_PM
/*
@ -262,19 +331,17 @@ static struct pmu_sleep_notifier time_sleep_notifier = {
/*
* Query the OF and get the decr frequency.
* This was taken from the pmac time_init() when merging the prep/pmac
* time functions.
*/
void __init
pmac_calibrate_decr(void)
void __init pmac_calibrate_decr(void)
{
struct device_node *cpu;
unsigned int freq, *fp;
#ifdef CONFIG_PM
/* XXX why here? */
pmu_register_sleep_notifier(&time_sleep_notifier);
#endif /* CONFIG_PM */
generic_calibrate_decr();
#ifdef CONFIG_PPC32
/* We assume MacRISC2 machines have correct device-tree
* calibration. That's better since the VIA itself seems
* to be slightly off. --BenH
@ -293,18 +360,5 @@ pmac_calibrate_decr(void)
if (machine_is_compatible("PowerMac3,5"))
if (via_calibrate_decr())
return;
/*
* The cpu node should have a timebase-frequency property
* to tell us the rate at which the decrementer counts.
*/
cpu = find_type_devices("cpu");
if (cpu == 0)
panic("can't find cpu node in time_init");
fp = (unsigned int *) get_property(cpu, "timebase-frequency", NULL);
if (fp == 0)
panic("can't get cpu timebase frequency");
freq = *fp;
printk("time_init: decrementer frequency = %u.%.6u MHz\n",
freq/1000000, freq%1000000);
ppc_tb_freq = freq;
#endif
}

View File

@ -119,6 +119,8 @@ struct machdep_calls {
/* Interface for platform error logging */
void (*log_error)(char *buf, unsigned int err_type, int fatal);
unsigned char (*nvram_read_val)(int addr);
void (*nvram_write_val)(int addr, unsigned char val);
ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index);
ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
ssize_t (*nvram_size)(void);
@ -165,15 +167,11 @@ struct machdep_calls {
unsigned long heartbeat_reset;
unsigned long heartbeat_count;
unsigned long (*find_end_of_memory)(void);
void (*setup_io_mappings)(void);
void (*early_serial_map)(void);
void (*kgdb_map_scc)(void);
unsigned char (*nvram_read_val)(int addr);
void (*nvram_write_val)(int addr, unsigned char val);
/*
* optional PCI "hooks"
*/

View File

@ -148,6 +148,8 @@ struct thread_struct;
extern struct task_struct * _switch(struct thread_struct *prev,
struct thread_struct *next);
extern int powersave_nap; /* set if nap mode can be used in idle loop */
/*
* Atomic exchange
*

View File

@ -28,4 +28,7 @@ extern unsigned long udbg_ifdebug(unsigned long flags);
extern void __init ppcdbg_initialize(void);
extern void udbg_init_uart(void __iomem *comport, unsigned int speed);
struct device_node;
extern void udbg_init_scc(struct device_node *np);
#endif