ia64: remove support for the SGI SN2 platform

The SGI SN2 (early Altix) is a very non-standard IA64 platform that was
at the very high end of even IA64 hardware, and has been discontinued
a long time ago.  Remove it because there no upstream users left, and it
has magic hooks all over the kernel.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lkml.kernel.org/r/20190813072514.23299-16-hch@lst.de
Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
Christoph Hellwig 2019-08-13 09:25:01 +02:00 committed by Tony Luck
parent f7bc6e42bf
commit cf07cb1ff4
92 changed files with 14 additions and 20657 deletions

View file

@ -66,7 +66,6 @@ config 64BIT
config ZONE_DMA32
def_bool y
depends on !IA64_SGI_SN2
config QUICKLIST
bool
@ -140,7 +139,6 @@ config IA64_GENERIC
DIG+Intel+IOMMU For DIG systems with Intel IOMMU
HP-zx1/sx1000 For HP systems
HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices.
SGI-SN2 For SGI Altix systems
SGI-UV For SGI UV systems
Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
@ -171,17 +169,6 @@ config IA64_HP_ZX1_SWIOTLB
I/O TLB, which allows supporting the broken devices at the expense of
wasting some kernel memory (about 2MB by default).
config IA64_SGI_SN2
bool "SGI-SN2"
select NUMA
select ACPI_NUMA
help
Selecting this option will optimize the kernel for use on sn2 based
systems, but the resulting kernel binary will not run on other
types of ia64 systems. If you have an SGI Altix system, it's safe
to select this option. If in doubt, select ia64 generic support
instead.
config IA64_SGI_UV
bool "SGI-UV"
select NUMA
@ -381,13 +368,12 @@ config ARCH_SPARSEMEM_ENABLE
select SPARSEMEM_VMEMMAP_ENABLE
config ARCH_DISCONTIGMEM_DEFAULT
def_bool y if (IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB)
def_bool y if (IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB)
depends on ARCH_DISCONTIGMEM_ENABLE
config NUMA
bool "NUMA support"
depends on !IA64_HP_SIM && !FLATMEM
default y if IA64_SGI_SN2
select ACPI_NUMA if ACPI
help
Say Y to compile the kernel to support NUMA (Non-Uniform Memory
@ -472,9 +458,6 @@ config IA64_MC_ERR_INJECT
If you're unsure, do not select this option.
config SGI_SN
def_bool y if (IA64_SGI_SN2 || IA64_GENERIC)
config IA64_ESI
bool "ESI (Extensible SAL Interface) support"
help

View file

@ -14,7 +14,7 @@ config IA64_GRANULE_16MB
config IA64_GRANULE_64MB
bool "64MB"
depends on !(IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_SGI_SN2)
depends on !(IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB)
endchoice

View file

@ -49,14 +49,13 @@ core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/ arch/ia64/uv/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/uv/
drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
boot := arch/ia64/hp/sim/boot

View file

@ -43,8 +43,6 @@ static inline const char *acpi_get_sysname (void)
return "hpzx1";
# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
return "hpzx1_swiotlb";
# elif defined (CONFIG_IA64_SGI_SN2)
return "sn2";
# elif defined (CONFIG_IA64_SGI_UV)
return "uv";
# elif defined (CONFIG_IA64_DIG)

View file

@ -28,9 +28,6 @@ irq_canonicalize (int irq)
}
extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
bool is_affinity_mask_valid(const struct cpumask *cpumask);
#define is_affinity_mask_valid is_affinity_mask_valid
int create_irq(void);
void destroy_irq(unsigned int irq);

View file

@ -101,8 +101,6 @@ extern void machvec_timer_interrupt (int, void *);
# include <asm/machvec_hpzx1.h>
# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
# include <asm/machvec_hpzx1_swiotlb.h>
# elif defined (CONFIG_IA64_SGI_SN2)
# include <asm/machvec_sn2.h>
# elif defined (CONFIG_IA64_SGI_UV)
# include <asm/machvec_uv.h>
# elif defined (CONFIG_IA64_GENERIC)

View file

@ -1,114 +0,0 @@
/*
* Copyright (c) 2002-2003,2006 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
#ifndef _ASM_IA64_MACHVEC_SN2_H
#define _ASM_IA64_MACHVEC_SN2_H
extern ia64_mv_setup_t sn_setup;
extern ia64_mv_cpu_init_t sn_cpu_init;
extern ia64_mv_irq_init_t sn_irq_init;
extern ia64_mv_send_ipi_t sn2_send_IPI;
extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
extern ia64_mv_irq_to_vector sn_irq_to_vector;
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read;
extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write;
extern ia64_mv_inb_t __sn_inb;
extern ia64_mv_inw_t __sn_inw;
extern ia64_mv_inl_t __sn_inl;
extern ia64_mv_outb_t __sn_outb;
extern ia64_mv_outw_t __sn_outw;
extern ia64_mv_outl_t __sn_outl;
extern ia64_mv_mmiowb_t __sn_mmiowb;
extern ia64_mv_readb_t __sn_readb;
extern ia64_mv_readw_t __sn_readw;
extern ia64_mv_readl_t __sn_readl;
extern ia64_mv_readq_t __sn_readq;
extern ia64_mv_readb_t __sn_readb_relaxed;
extern ia64_mv_readw_t __sn_readw_relaxed;
extern ia64_mv_readl_t __sn_readl_relaxed;
extern ia64_mv_readq_t __sn_readq_relaxed;
extern ia64_mv_dma_init sn_dma_init;
extern ia64_mv_migrate_t sn_migrate;
extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq;
extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
/*
* This stuff has dual use!
*
* For a generic kernel, the macros are used to initialize the
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
#define ia64_platform_name "sn2"
#define platform_setup sn_setup
#define platform_cpu_init sn_cpu_init
#define platform_irq_init sn_irq_init
#define platform_send_ipi sn2_send_IPI
#define platform_timer_interrupt sn_timer_interrupt
#define platform_global_tlb_purge sn2_global_tlb_purge
#define platform_pci_fixup sn_pci_fixup
#define platform_inb __sn_inb
#define platform_inw __sn_inw
#define platform_inl __sn_inl
#define platform_outb __sn_outb
#define platform_outw __sn_outw
#define platform_outl __sn_outl
#define platform_mmiowb __sn_mmiowb
#define platform_readb __sn_readb
#define platform_readw __sn_readw
#define platform_readl __sn_readl
#define platform_readq __sn_readq
#define platform_readb_relaxed __sn_readb_relaxed
#define platform_readw_relaxed __sn_readw_relaxed
#define platform_readl_relaxed __sn_readl_relaxed
#define platform_readq_relaxed __sn_readq_relaxed
#define platform_irq_to_vector sn_irq_to_vector
#define platform_local_vector_to_irq sn_local_vector_to_irq
#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
#define platform_pci_legacy_read sn_pci_legacy_read
#define platform_pci_legacy_write sn_pci_legacy_write
#define platform_dma_init sn_dma_init
#define platform_migrate sn_migrate
#define platform_kernel_launch_event sn_kernel_launch_event
#ifdef CONFIG_PCI_MSI
#define platform_setup_msi_irq sn_setup_msi_irq
#define platform_teardown_msi_irq sn_teardown_msi_irq
#else
#define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
#define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
#endif
#define platform_pci_fixup_bus sn_pci_fixup_bus
#include <asm/sn/io.h>
#endif /* _ASM_IA64_MACHVEC_SN2_H */

View file

@ -30,7 +30,7 @@ static inline int pfn_to_nid(unsigned long pfn)
#ifdef CONFIG_IA64_DIG /* DIG systems are small */
# define MAX_PHYSNODE_ID 8
# define NR_NODE_MEMBLKS (MAX_NUMNODES * 8)
#else /* sn2 is the biggest case, so we use that if !DIG */
#else
# define MAX_PHYSNODE_ID 2048
# define NR_NODE_MEMBLKS (MAX_NUMNODES * 4)
#endif

View file

@ -1,15 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_ACPI_H
#define _ASM_IA64_SN_ACPI_H
extern int sn_acpi_rev;
#define SN_ACPI_BASE_SUPPORT() (sn_acpi_rev >= 0x20101)
#endif /* _ASM_IA64_SN_ACPI_H */

View file

@ -1,299 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1992-1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_ADDRS_H
#define _ASM_IA64_SN_ADDRS_H
#include <asm/percpu.h>
#include <asm/sn/types.h>
#include <asm/sn/arch.h>
#include <asm/sn/pda.h>
/*
* Memory/SHUB Address Format:
* +-+---------+--+--------------+
* |0| NASID |AS| NodeOffset |
* +-+---------+--+--------------+
*
* NASID: (low NASID bit is 0) Memory and SHUB MMRs
* AS: 2-bit Address Space Identifier. Used only if low NASID bit is 0
* 00: Local Resources and MMR space
* Top bit of NodeOffset
* 0: Local resources space
* node id:
* 0: IA64/NT compatibility space
* 2: Local MMR Space
* 4: Local memory, regardless of local node id
* 1: Global MMR space
* 01: GET space.
* 10: AMO space.
* 11: Cacheable memory space.
*
* NodeOffset: byte offset
*
*
* TIO address format:
* +-+----------+--+--------------+
* |0| NASID |AS| Nodeoffset |
* +-+----------+--+--------------+
*
* NASID: (low NASID bit is 1) TIO
* AS: 2-bit Chiplet Identifier
* 00: TIO LB (Indicates TIO MMR access.)
* 01: TIO ICE (indicates coretalk space access.)
*
* NodeOffset: top bit must be set.
*
*
* Note that in both of the above address formats, the low
* NASID bit indicates if the reference is to the SHUB or TIO MMRs.
*/
/*
* Define basic shift & mask constants for manipulating NASIDs and AS values.
*/
#define NASID_BITMASK (sn_hub_info->nasid_bitmask)
#define NASID_SHIFT (sn_hub_info->nasid_shift)
#define AS_SHIFT (sn_hub_info->as_shift)
#define AS_BITMASK 0x3UL
#define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT)
#define AS_MASK ((u64)AS_BITMASK << AS_SHIFT)
/*
* AS values. These are the same on both SHUB1 & SHUB2.
*/
#define AS_GET_VAL 1UL
#define AS_AMO_VAL 2UL
#define AS_CAC_VAL 3UL
#define AS_GET_SPACE (AS_GET_VAL << AS_SHIFT)
#define AS_AMO_SPACE (AS_AMO_VAL << AS_SHIFT)
#define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT)
/*
* Virtual Mode Local & Global MMR space.
*/
#define SH1_LOCAL_MMR_OFFSET 0x8000000000UL
#define SH2_LOCAL_MMR_OFFSET 0x0200000000UL
#define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET)
#define LOCAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET)
#define LOCAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET)
#define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL
#define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL
#define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET)
#define GLOBAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET)
/*
* Physical mode addresses
*/
#define GLOBAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET)
/*
* Clear region & AS bits.
*/
#define TO_PHYS_MASK (~(RGN_BITS | AS_MASK))
/*
* Misc NASID manipulation.
*/
#define NASID_SPACE(n) ((u64)(n) << NASID_SHIFT)
#define REMOTE_ADDR(n,a) (NASID_SPACE(n) | (a))
#define NODE_OFFSET(x) ((x) & (NODE_ADDRSPACE_SIZE - 1))
#define NODE_ADDRSPACE_SIZE (1UL << AS_SHIFT)
#define NASID_GET(x) (int) (((u64) (x) >> NASID_SHIFT) & NASID_BITMASK)
#define LOCAL_MMR_ADDR(a) (LOCAL_MMR_SPACE | (a))
#define GLOBAL_MMR_ADDR(n,a) (GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a))
#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a))
#define GLOBAL_CAC_ADDR(n,a) (CAC_BASE | REMOTE_ADDR(n,a))
#define CHANGE_NASID(n,x) ((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n)))
#define IS_TIO_NASID(n) ((n) & 1)
/* non-II mmr's start at top of big window space (4G) */
#define BWIN_TOP 0x0000000100000000UL
/*
* general address defines
*/
#define CAC_BASE (PAGE_OFFSET | AS_CAC_SPACE)
#define AMO_BASE (__IA64_UNCACHED_OFFSET | AS_AMO_SPACE)
#define AMO_PHYS_BASE (RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE)
#define GET_BASE (PAGE_OFFSET | AS_GET_SPACE)
/*
* Convert Memory addresses between various addressing modes.
*/
#define TO_PHYS(x) (TO_PHYS_MASK & (x))
#define TO_CAC(x) (CAC_BASE | TO_PHYS(x))
#ifdef CONFIG_SGI_SN
#define TO_AMO(x) (AMO_BASE | TO_PHYS(x))
#define TO_GET(x) (GET_BASE | TO_PHYS(x))
#else
#define TO_AMO(x) ({ BUG(); x; })
#define TO_GET(x) ({ BUG(); x; })
#endif
/*
* Covert from processor physical address to II/TIO physical address:
* II - squeeze out the AS bits
* TIO- requires a chiplet id in bits 38-39. For DMA to memory,
* the chiplet id is zero. If we implement TIO-TIO dma, we might need
* to insert a chiplet id into this macro. However, it is our belief
* right now that this chiplet id will be ICE, which is also zero.
*/
#define SH1_TIO_PHYS_TO_DMA(x) \
((((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x))
#define SH2_NETWORK_BANK_OFFSET(x) \
((u64)(x) & ((1UL << (sn_hub_info->nasid_shift - 4)) -1))
#define SH2_NETWORK_BANK_SELECT(x) \
((((u64)(x) & (0x3UL << (sn_hub_info->nasid_shift - 4))) \
>> (sn_hub_info->nasid_shift - 4)) << 36)
#define SH2_NETWORK_ADDRESS(x) \
(SH2_NETWORK_BANK_OFFSET(x) | SH2_NETWORK_BANK_SELECT(x))
#define SH2_TIO_PHYS_TO_DMA(x) \
(((u64)(NASID_GET(x)) << 40) | SH2_NETWORK_ADDRESS(x))
#define PHYS_TO_TIODMA(x) \
(is_shub1() ? SH1_TIO_PHYS_TO_DMA(x) : SH2_TIO_PHYS_TO_DMA(x))
#define PHYS_TO_DMA(x) \
((((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x))
/*
* Macros to test for address type.
*/
#define IS_AMO_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE)
#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE)
/*
* The following definitions pertain to the IO special address
* space. They define the location of the big and little windows
* of any given node.
*/
#define BWIN_SIZE_BITS 29 /* big window size: 512M */
#define TIO_BWIN_SIZE_BITS 30 /* big window size: 1G */
#define NODE_SWIN_BASE(n, w) ((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \
: RAW_NODE_SWIN_BASE(n, w))
#define TIO_SWIN_BASE(n, w) (TIO_IO_BASE(n) + \
((u64) (w) << TIO_SWIN_SIZE_BITS))
#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n))
#define TIO_IO_BASE(n) (__IA64_UNCACHED_OFFSET | NASID_SPACE(n))
#define BWIN_SIZE (1UL << BWIN_SIZE_BITS)
#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE)
#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))
#define RAW_NODE_SWIN_BASE(n, w) (NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS))
#define BWIN_WIDGET_MASK 0x7
#define BWIN_WINDOWNUM(x) (((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
#define SH1_IS_BIG_WINDOW_ADDR(x) ((x) & BWIN_TOP)
#define TIO_BWIN_WINDOW_SELECT_MASK 0x7
#define TIO_BWIN_WINDOWNUM(x) (((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK)
#define TIO_HWIN_SHIFT_BITS 33
#define TIO_HWIN(x) (NODE_OFFSET(x) >> TIO_HWIN_SHIFT_BITS)
/*
* The following definitions pertain to the IO special address
* space. They define the location of the big and little windows
* of any given node.
*/
#define SWIN_SIZE_BITS 24
#define SWIN_WIDGET_MASK 0xF
#define TIO_SWIN_SIZE_BITS 28
#define TIO_SWIN_SIZE (1UL << TIO_SWIN_SIZE_BITS)
#define TIO_SWIN_WIDGET_MASK 0x3
/*
* Convert smallwindow address to xtalk address.
*
* 'addr' can be physical or virtual address, but will be converted
* to Xtalk address in the range 0 -> SWINZ_SIZEMASK
*/
#define SWIN_WIDGETNUM(x) (((x) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
#define TIO_SWIN_WIDGETNUM(x) (((x) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
/*
* The following macros produce the correct base virtual address for
* the hub registers. The REMOTE_HUB_* macro produce
* the address for the specified hub's registers. The intent is
* that the appropriate PI, MD, NI, or II register would be substituted
* for x.
*
* WARNING:
* When certain Hub chip workaround are defined, it's not sufficient
* to dereference the *_HUB_ADDR() macros. You should instead use
* HUB_L() and HUB_S() if you must deal with pointers to hub registers.
* Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
* They're always safe.
*/
/* Shub1 TIO & MMR addressing macros */
#define SH1_TIO_IOSPACE_ADDR(n,x) \
GLOBAL_MMR_ADDR(n,x)
#define SH1_REMOTE_BWIN_MMR(n,x) \
GLOBAL_MMR_ADDR(n,x)
#define SH1_REMOTE_SWIN_MMR(n,x) \
(NODE_SWIN_BASE(n,1) + 0x800000UL + (x))
#define SH1_REMOTE_MMR(n,x) \
(SH1_IS_BIG_WINDOW_ADDR(x) ? SH1_REMOTE_BWIN_MMR(n,x) : \
SH1_REMOTE_SWIN_MMR(n,x))
/* Shub1 TIO & MMR addressing macros */
#define SH2_TIO_IOSPACE_ADDR(n,x) \
((__IA64_UNCACHED_OFFSET | REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2)))
#define SH2_REMOTE_MMR(n,x) \
GLOBAL_MMR_ADDR(n,x)
/* TIO & MMR addressing macros that work on both shub1 & shub2 */
#define TIO_IOSPACE_ADDR(n,x) \
((u64 *)(is_shub1() ? SH1_TIO_IOSPACE_ADDR(n,x) : \
SH2_TIO_IOSPACE_ADDR(n,x)))
#define SH_REMOTE_MMR(n,x) \
(is_shub1() ? SH1_REMOTE_MMR(n,x) : SH2_REMOTE_MMR(n,x))
#define REMOTE_HUB_ADDR(n,x) \
(IS_TIO_NASID(n) ? ((volatile u64*)TIO_IOSPACE_ADDR(n,x)) : \
((volatile u64*)SH_REMOTE_MMR(n,x)))
#define HUB_L(x) (*((volatile typeof(*x) *)x))
#define HUB_S(x,d) (*((volatile typeof(*x) *)x) = (d))
#define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a)))
#define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d))
/*
* Coretalk address breakdown
*/
#define CTALK_NASID_SHFT 40
#define CTALK_NASID_MASK (0x3FFFULL << CTALK_NASID_SHFT)
#define CTALK_CID_SHFT 38
#define CTALK_CID_MASK (0x3ULL << CTALK_CID_SHFT)
#define CTALK_NODE_OFFSET 0x3FFFFFFFFF
#endif /* _ASM_IA64_SN_ADDRS_H */

View file

@ -1,86 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* SGI specific setup.
*
* Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
*/
#ifndef _ASM_IA64_SN_ARCH_H
#define _ASM_IA64_SN_ARCH_H
#include <linux/numa.h>
#include <asm/types.h>
#include <asm/percpu.h>
#include <asm/sn/types.h>
#include <asm/sn/sn_cpuid.h>
/*
* This is the maximum number of NUMALINK nodes that can be part of a single
* SSI kernel. This number includes C-brick, M-bricks, and TIOs. Nodes in
* remote partitions are NOT included in this number.
* The number of compact nodes cannot exceed size of a coherency domain.
* The purpose of this define is to specify a node count that includes
* all C/M/TIO nodes in an SSI system.
*
* SGI system can currently support up to 256 C/M nodes plus additional TIO nodes.
*
* Note: ACPI20 has an architectural limit of 256 nodes. When we upgrade
* to ACPI3.0, this limit will be removed. The notion of "compact nodes"
* should be deleted and TIOs should be included in MAX_NUMNODES.
*/
#define MAX_TIO_NODES MAX_NUMNODES
#define MAX_COMPACT_NODES (MAX_NUMNODES + MAX_TIO_NODES)
/*
* Maximum number of nodes in all partitions and in all coherency domains.
* This is the total number of nodes accessible in the numalink fabric. It
* includes all C & M bricks, plus all TIOs.
*
* This value is also the value of the maximum number of NASIDs in the numalink
* fabric.
*/
#define MAX_NUMALINK_NODES 16384
/*
* The following defines attributes of the HUB chip. These attributes are
* frequently referenced. They are kept in the per-cpu data areas of each cpu.
* They are kept together in a struct to minimize cache misses.
*/
struct sn_hub_info_s {
u8 shub2;
u8 nasid_shift;
u8 as_shift;
u8 shub_1_1_found;
u16 nasid_bitmask;
};
DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
#define sn_hub_info this_cpu_ptr(&__sn_hub_info)
#define is_shub2() (sn_hub_info->shub2)
#define is_shub1() (sn_hub_info->shub2 == 0)
/*
* Use this macro to test if shub 1.1 wars should be enabled
*/
#define enable_shub_wars_1_1() (sn_hub_info->shub_1_1_found)
/*
* Compact node ID to nasid mappings kept in the per-cpu data areas of each
* cpu.
*/
DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
#define sn_cnodeid_to_nasid this_cpu_ptr(&__sn_cnodeid_to_nasid[0])
extern u8 sn_partition_id;
extern u8 sn_system_size;
extern u8 sn_sharing_domain_size;
extern u8 sn_region_size;
extern void sn_flush_all_caches(long addr, long bytes);
extern bool sn_cpu_disable_allowed(int cpu);
#endif /* _ASM_IA64_SN_ARCH_H */

View file

@ -1,236 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_BTE_H
#define _ASM_IA64_SN_BTE_H
#include <linux/timer.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <asm/sn/pda.h>
#include <asm/sn/types.h>
#include <asm/sn/shub_mmr.h>
struct nodepda_s;
#define IBCT_NOTIFY (0x1UL << 4)
#define IBCT_ZFIL_MODE (0x1UL << 0)
/* #define BTE_DEBUG */
/* #define BTE_DEBUG_VERBOSE */
#ifdef BTE_DEBUG
# define BTE_PRINTK(x) printk x /* Terse */
# ifdef BTE_DEBUG_VERBOSE
# define BTE_PRINTKV(x) printk x /* Verbose */
# else
# define BTE_PRINTKV(x)
# endif /* BTE_DEBUG_VERBOSE */
#else
# define BTE_PRINTK(x)
# define BTE_PRINTKV(x)
#endif /* BTE_DEBUG */
/* BTE status register only supports 16 bits for length field */
#define BTE_LEN_BITS (16)
#define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1)
#define BTE_MAX_XFER (BTE_LEN_MASK << L1_CACHE_SHIFT)
/* Define hardware */
#define BTES_PER_NODE (is_shub2() ? 4 : 2)
#define MAX_BTES_PER_NODE 4
#define BTE2OFF_CTRL 0
#define BTE2OFF_SRC (SH2_BT_ENG_SRC_ADDR_0 - SH2_BT_ENG_CSR_0)
#define BTE2OFF_DEST (SH2_BT_ENG_DEST_ADDR_0 - SH2_BT_ENG_CSR_0)
#define BTE2OFF_NOTIFY (SH2_BT_ENG_NOTIF_ADDR_0 - SH2_BT_ENG_CSR_0)
#define BTE_BASE_ADDR(interface) \
(is_shub2() ? (interface == 0) ? SH2_BT_ENG_CSR_0 : \
(interface == 1) ? SH2_BT_ENG_CSR_1 : \
(interface == 2) ? SH2_BT_ENG_CSR_2 : \
SH2_BT_ENG_CSR_3 \
: (interface == 0) ? IIO_IBLS0 : IIO_IBLS1)
#define BTE_SOURCE_ADDR(base) \
(is_shub2() ? base + (BTE2OFF_SRC/8) \
: base + (BTEOFF_SRC/8))
#define BTE_DEST_ADDR(base) \
(is_shub2() ? base + (BTE2OFF_DEST/8) \
: base + (BTEOFF_DEST/8))
#define BTE_CTRL_ADDR(base) \
(is_shub2() ? base + (BTE2OFF_CTRL/8) \
: base + (BTEOFF_CTRL/8))
#define BTE_NOTIF_ADDR(base) \
(is_shub2() ? base + (BTE2OFF_NOTIFY/8) \
: base + (BTEOFF_NOTIFY/8))
/* Define hardware modes */
#define BTE_NOTIFY IBCT_NOTIFY
#define BTE_NORMAL BTE_NOTIFY
#define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE)
/* Use a reserved bit to let the caller specify a wait for any BTE */
#define BTE_WACQUIRE 0x4000
/* Use the BTE on the node with the destination memory */
#define BTE_USE_DEST (BTE_WACQUIRE << 1)
/* Use any available BTE interface on any node for the transfer */
#define BTE_USE_ANY (BTE_USE_DEST << 1)
/* macro to force the IBCT0 value valid */
#define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE))
#define BTE_ACTIVE (IBLS_BUSY | IBLS_ERROR)
#define BTE_WORD_AVAILABLE (IBLS_BUSY << 1)
#define BTE_WORD_BUSY (~BTE_WORD_AVAILABLE)
/*
* Some macros to simplify reading.
* Start with macros to locate the BTE control registers.
*/
#define BTE_LNSTAT_LOAD(_bte) \
HUB_L(_bte->bte_base_addr)
#define BTE_LNSTAT_STORE(_bte, _x) \
HUB_S(_bte->bte_base_addr, (_x))
#define BTE_SRC_STORE(_bte, _x) \
({ \
u64 __addr = ((_x) & ~AS_MASK); \
if (is_shub2()) \
__addr = SH2_TIO_PHYS_TO_DMA(__addr); \
HUB_S(_bte->bte_source_addr, __addr); \
})
#define BTE_DEST_STORE(_bte, _x) \
({ \
u64 __addr = ((_x) & ~AS_MASK); \
if (is_shub2()) \
__addr = SH2_TIO_PHYS_TO_DMA(__addr); \
HUB_S(_bte->bte_destination_addr, __addr); \
})
#define BTE_CTRL_STORE(_bte, _x) \
HUB_S(_bte->bte_control_addr, (_x))
#define BTE_NOTIF_STORE(_bte, _x) \
({ \
u64 __addr = ia64_tpa((_x) & ~AS_MASK); \
if (is_shub2()) \
__addr = SH2_TIO_PHYS_TO_DMA(__addr); \
HUB_S(_bte->bte_notify_addr, __addr); \
})
#define BTE_START_TRANSFER(_bte, _len, _mode) \
is_shub2() ? BTE_CTRL_STORE(_bte, IBLS_BUSY | (_mode << 24) | _len) \
: BTE_LNSTAT_STORE(_bte, _len); \
BTE_CTRL_STORE(_bte, _mode)
/* Possible results from bte_copy and bte_unaligned_copy */
/* The following error codes map into the BTE hardware codes
* IIO_ICRB_ECODE_* (in shubio.h). The hardware uses
* an error code of 0 (IIO_ICRB_ECODE_DERR), but we want zero
* to mean BTE_SUCCESS, so add one (BTEFAIL_OFFSET) to the error
* codes to give the following error codes.
*/
#define BTEFAIL_OFFSET 1
typedef enum {
BTE_SUCCESS, /* 0 is success */
BTEFAIL_DIR, /* Directory error due to IIO access*/
BTEFAIL_POISON, /* poison error on IO access (write to poison page) */
BTEFAIL_WERR, /* Write error (ie WINV to a Read only line) */
BTEFAIL_ACCESS, /* access error (protection violation) */
BTEFAIL_PWERR, /* Partial Write Error */
BTEFAIL_PRERR, /* Partial Read Error */
BTEFAIL_TOUT, /* CRB Time out */
BTEFAIL_XTERR, /* Incoming xtalk pkt had error bit */
BTEFAIL_NOTAVAIL, /* BTE not available */
} bte_result_t;
#define BTEFAIL_SH2_RESP_SHORT 0x1 /* bit 000001 */
#define BTEFAIL_SH2_RESP_LONG 0x2 /* bit 000010 */
#define BTEFAIL_SH2_RESP_DSP 0x4 /* bit 000100 */
#define BTEFAIL_SH2_RESP_ACCESS 0x8 /* bit 001000 */
#define BTEFAIL_SH2_CRB_TO 0x10 /* bit 010000 */
#define BTEFAIL_SH2_NACK_LIMIT 0x20 /* bit 100000 */
#define BTEFAIL_SH2_ALL 0x3F /* bit 111111 */
#define BTE_ERR_BITS 0x3FUL
#define BTE_ERR_SHIFT 36
#define BTE_ERR_MASK (BTE_ERR_BITS << BTE_ERR_SHIFT)
#define BTE_ERROR_RETRY(value) \
(is_shub2() ? (value != BTEFAIL_SH2_CRB_TO) \
: (value != BTEFAIL_TOUT))
/*
* On shub1 BTE_ERR_MASK will always be false, so no need for is_shub2()
*/
#define BTE_SHUB2_ERROR(_status) \
((_status & BTE_ERR_MASK) \
? (((_status >> BTE_ERR_SHIFT) & BTE_ERR_BITS) | IBLS_ERROR) \
: _status)
#define BTE_GET_ERROR_STATUS(_status) \
(BTE_SHUB2_ERROR(_status) & ~IBLS_ERROR)
#define BTE_VALID_SH2_ERROR(value) \
((value >= BTEFAIL_SH2_RESP_SHORT) && (value <= BTEFAIL_SH2_ALL))
/*
* Structure defining a bte. An instance of this
* structure is created in the nodepda for each
* bte on that node (as defined by BTES_PER_NODE)
* This structure contains everything necessary
* to work with a BTE.
*/
struct bteinfo_s {
volatile u64 notify ____cacheline_aligned;
u64 *bte_base_addr ____cacheline_aligned;
u64 *bte_source_addr;
u64 *bte_destination_addr;
u64 *bte_control_addr;
u64 *bte_notify_addr;
spinlock_t spinlock;
cnodeid_t bte_cnode; /* cnode */
int bte_error_count; /* Number of errors encountered */
int bte_num; /* 0 --> BTE0, 1 --> BTE1 */
int cleanup_active; /* Interface is locked for cleanup */
volatile bte_result_t bh_error; /* error while processing */
volatile u64 *most_rcnt_na;
struct bteinfo_s *btes_to_try[MAX_BTES_PER_NODE];
};
/*
* Function prototypes (functions defined in bte.c, used elsewhere)
*/
extern bte_result_t bte_copy(u64, u64, u64, u64, void *);
extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64);
extern void bte_error_handler(struct nodepda_s *);
#define bte_zero(dest, len, mode, notification) \
bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification)
/*
* The following is the preferred way of calling bte_unaligned_copy
* If the copy is fully cache line aligned, then bte_copy is
* used instead. Since bte_copy is inlined, this saves a call
* stack. NOTE: bte_copy is called synchronously and does block
* until the transfer is complete. In order to get the asynch
* version of bte_copy, you must perform this check yourself.
*/
#define BTE_UNALIGNED_COPY(src, dest, len, mode) \
(((len & (L1_CACHE_BYTES - 1)) || \
(src & (L1_CACHE_BYTES - 1)) || \
(dest & (L1_CACHE_BYTES - 1))) ? \
bte_unaligned_copy(src, dest, len, mode) : \
bte_copy(src, dest, len, mode, NULL))
#endif /* _ASM_IA64_SN_BTE_H */

View file

@ -1,28 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
/*
* This file contains definitions for accessing a platform supported high resolution
* clock. The clock is monitonically increasing and can be accessed from any node
* in the system. The clock is synchronized across nodes - all nodes see the
* same value.
*
* RTC_COUNTER_ADDR - contains the address of the counter
*
*/
#ifndef _ASM_IA64_SN_CLKSUPPORT_H
#define _ASM_IA64_SN_CLKSUPPORT_H
extern unsigned long sn_rtc_cycles_per_second;
#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
#define rtc_time() (*RTC_COUNTER_ADDR)
#endif /* _ASM_IA64_SN_CLKSUPPORT_H */

View file

@ -1,132 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_GEO_H
#define _ASM_IA64_SN_GEO_H
/* The geoid_t implementation below is based loosely on the pcfg_t
implementation in sys/SN/promcfg.h. */
/* Type declaractions */
/* Size of a geoid_t structure (must be before decl. of geoid_u) */
#define GEOID_SIZE 8 /* Would 16 be better? The size can
be different on different platforms. */
#define MAX_SLOTS 0xf /* slots per module */
#define MAX_SLABS 0xf /* slabs per slot */
typedef unsigned char geo_type_t;
/* Fields common to all substructures */
typedef struct geo_common_s {
moduleid_t module; /* The module (box) this h/w lives in */
geo_type_t type; /* What type of h/w is named by this geoid_t */
slabid_t slab:4; /* slab (ASIC), 0 .. 15 within slot */
slotid_t slot:4; /* slot (Blade), 0 .. 15 within module */
} geo_common_t;
/* Additional fields for particular types of hardware */
typedef struct geo_node_s {
geo_common_t common; /* No additional fields needed */
} geo_node_t;
typedef struct geo_rtr_s {
geo_common_t common; /* No additional fields needed */
} geo_rtr_t;
typedef struct geo_iocntl_s {
geo_common_t common; /* No additional fields needed */
} geo_iocntl_t;
typedef struct geo_pcicard_s {
geo_iocntl_t common;
char bus; /* Bus/widget number */
char slot; /* PCI slot number */
} geo_pcicard_t;
/* Subcomponents of a node */
typedef struct geo_cpu_s {
geo_node_t node;
char slice; /* Which CPU on the node */
} geo_cpu_t;
typedef struct geo_mem_s {
geo_node_t node;
char membus; /* The memory bus on the node */
char memslot; /* The memory slot on the bus */
} geo_mem_t;
typedef union geoid_u {
geo_common_t common;
geo_node_t node;
geo_iocntl_t iocntl;
geo_pcicard_t pcicard;
geo_rtr_t rtr;
geo_cpu_t cpu;
geo_mem_t mem;
char padsize[GEOID_SIZE];
} geoid_t;
/* Preprocessor macros */
#define GEO_MAX_LEN 48 /* max. formatted length, plus some pad:
module/001c07/slab/5/node/memory/2/slot/4 */
/* Values for geo_type_t */
#define GEO_TYPE_INVALID 0
#define GEO_TYPE_MODULE 1
#define GEO_TYPE_NODE 2
#define GEO_TYPE_RTR 3
#define GEO_TYPE_IOCNTL 4
#define GEO_TYPE_IOCARD 5
#define GEO_TYPE_CPU 6
#define GEO_TYPE_MEM 7
#define GEO_TYPE_MAX (GEO_TYPE_MEM+1)
/* Parameter for hwcfg_format_geoid_compt() */
#define GEO_COMPT_MODULE 1
#define GEO_COMPT_SLAB 2
#define GEO_COMPT_IOBUS 3
#define GEO_COMPT_IOSLOT 4
#define GEO_COMPT_CPU 5
#define GEO_COMPT_MEMBUS 6
#define GEO_COMPT_MEMSLOT 7
#define GEO_INVALID_STR "<invalid>"
#define INVALID_NASID ((nasid_t)-1)
#define INVALID_CNODEID ((cnodeid_t)-1)
#define INVALID_PNODEID ((pnodeid_t)-1)
#define INVALID_SLAB (slabid_t)-1
#define INVALID_SLOT (slotid_t)-1
#define INVALID_MODULE ((moduleid_t)-1)
static inline slabid_t geo_slab(geoid_t g)
{
return (g.common.type == GEO_TYPE_INVALID) ?
INVALID_SLAB : g.common.slab;
}
static inline slotid_t geo_slot(geoid_t g)
{
return (g.common.type == GEO_TYPE_INVALID) ?
INVALID_SLOT : g.common.slot;
}
static inline moduleid_t geo_module(geoid_t g)
{
return (g.common.type == GEO_TYPE_INVALID) ?
INVALID_MODULE : g.common.module;
}
extern geoid_t cnodeid_get_geoid(cnodeid_t cnode);
#endif /* _ASM_IA64_SN_GEO_H */

View file

@ -9,60 +9,7 @@
#ifndef _ASM_IA64_SN_INTR_H
#define _ASM_IA64_SN_INTR_H
#include <linux/rcupdate.h>
#include <asm/sn/types.h>
#define SGI_UART_VECTOR 0xe9
/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */
#define SGI_XPC_ACTIVATE 0x30
#define SGI_II_ERROR 0x31
#define SGI_XBOW_ERROR 0x32
#define SGI_PCIASIC_ERROR 0x33
#define SGI_ACPI_SCI_INT 0x34
#define SGI_TIOCA_ERROR 0x35
#define SGI_TIO_ERROR 0x36
#define SGI_TIOCX_ERROR 0x37
#define SGI_MMTIMER_VECTOR 0x38
#define SGI_XPC_NOTIFY 0xe7
#define IA64_SN2_FIRST_DEVICE_VECTOR 0x3c
#define IA64_SN2_LAST_DEVICE_VECTOR 0xe6
#define SN2_IRQ_RESERVED 0x1
#define SN2_IRQ_CONNECTED 0x2
#define SN2_IRQ_SHARED 0x4
// The SN PROM irq struct
struct sn_irq_info {
struct sn_irq_info *irq_next; /* deprecated DO NOT USE */
short irq_nasid; /* Nasid IRQ is assigned to */
int irq_slice; /* slice IRQ is assigned to */
int irq_cpuid; /* kernel logical cpuid */
int irq_irq; /* the IRQ number */
int irq_int_bit; /* Bridge interrupt pin */
/* <0 means MSI */
u64 irq_xtalkaddr; /* xtalkaddr IRQ is sent to */
int irq_bridge_type;/* pciio asic type (pciio.h) */
void *irq_bridge; /* bridge generating irq */
void *irq_pciioinfo; /* associated pciio_info_t */
int irq_last_intr; /* For Shub lb lost intr WAR */
int irq_cookie; /* unique cookie */
int irq_flags; /* flags */
int irq_share_cnt; /* num devices sharing IRQ */
struct list_head list; /* list of sn_irq_info structs */
struct rcu_head rcu; /* rcu callback list */
};
extern void sn_send_IPI_phys(int, long, int, int);
extern u64 sn_intr_alloc(nasid_t, int,
struct sn_irq_info *,
int, nasid_t, int);
extern void sn_intr_free(nasid_t, int, struct sn_irq_info *);
extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int);
extern void sn_set_err_irq_affinity(unsigned int);
extern struct list_head **sn_irq_lh;
#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
#endif /* _ASM_IA64_SN_INTR_H */

View file

@ -1,274 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_IO_H
#define _ASM_SN_IO_H
#include <linux/compiler.h>
#include <asm/intrinsics.h>
extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
extern void __sn_mmiowb(void); /* Forward definition */
extern int num_cnodes;
#define __sn_mf_a() ia64_mfa()
extern void sn_dma_flush(unsigned long);
#define __sn_inb ___sn_inb
#define __sn_inw ___sn_inw
#define __sn_inl ___sn_inl
#define __sn_outb ___sn_outb
#define __sn_outw ___sn_outw
#define __sn_outl ___sn_outl
#define __sn_readb ___sn_readb
#define __sn_readw ___sn_readw
#define __sn_readl ___sn_readl
#define __sn_readq ___sn_readq
#define __sn_readb_relaxed ___sn_readb_relaxed
#define __sn_readw_relaxed ___sn_readw_relaxed
#define __sn_readl_relaxed ___sn_readl_relaxed
#define __sn_readq_relaxed ___sn_readq_relaxed
/*
* Convenience macros for setting/clearing bits using the above accessors
*/
#define __sn_setq_relaxed(addr, val) \
writeq((__sn_readq_relaxed(addr) | (val)), (addr))
#define __sn_clrq_relaxed(addr, val) \
writeq((__sn_readq_relaxed(addr) & ~(val)), (addr))
/*
* The following routines are SN Platform specific, called when
* a reference is made to inX/outX set macros. SN Platform
* inX set of macros ensures that Posted DMA writes on the
* Bridge is flushed.
*
* The routines should be self explainatory.
*/
static inline unsigned int
___sn_inb (unsigned long port)
{
volatile unsigned char *addr;
unsigned char ret = -1;
if ((addr = sn_io_addr(port))) {
ret = *addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
}
return ret;
}
static inline unsigned int
___sn_inw (unsigned long port)
{
volatile unsigned short *addr;
unsigned short ret = -1;
if ((addr = sn_io_addr(port))) {
ret = *addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
}
return ret;
}
static inline unsigned int
___sn_inl (unsigned long port)
{
volatile unsigned int *addr;
unsigned int ret = -1;
if ((addr = sn_io_addr(port))) {
ret = *addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
}
return ret;
}
static inline void
___sn_outb (unsigned char val, unsigned long port)
{
volatile unsigned char *addr;
if ((addr = sn_io_addr(port))) {
*addr = val;
__sn_mmiowb();
}
}
static inline void
___sn_outw (unsigned short val, unsigned long port)
{
volatile unsigned short *addr;
if ((addr = sn_io_addr(port))) {
*addr = val;
__sn_mmiowb();
}
}
static inline void
___sn_outl (unsigned int val, unsigned long port)
{
volatile unsigned int *addr;
if ((addr = sn_io_addr(port))) {
*addr = val;
__sn_mmiowb();
}
}
/*
* The following routines are SN Platform specific, called when
* a reference is made to readX/writeX set macros. SN Platform
* readX set of macros ensures that Posted DMA writes on the
* Bridge is flushed.
*
* The routines should be self explainatory.
*/
static inline unsigned char
___sn_readb (const volatile void __iomem *addr)
{
unsigned char val;
val = *(volatile unsigned char __force *)addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned short
___sn_readw (const volatile void __iomem *addr)
{
unsigned short val;
val = *(volatile unsigned short __force *)addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned int
___sn_readl (const volatile void __iomem *addr)
{
unsigned int val;
val = *(volatile unsigned int __force *)addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned long
___sn_readq (const volatile void __iomem *addr)
{
unsigned long val;
val = *(volatile unsigned long __force *)addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
/*
* For generic and SN2 kernels, we have a set of fast access
* PIO macros. These macros are provided on SN Platform
* because the normal inX and readX macros perform an
* additional task of flushing Post DMA request on the Bridge.
*
* These routines should be self explainatory.
*/
static inline unsigned int
sn_inb_fast (unsigned long port)
{
volatile unsigned char *addr = (unsigned char *)port;
unsigned char ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned int
sn_inw_fast (unsigned long port)
{
volatile unsigned short *addr = (unsigned short *)port;
unsigned short ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned int
sn_inl_fast (unsigned long port)
{
volatile unsigned int *addr = (unsigned int *)port;
unsigned int ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned char
___sn_readb_relaxed (const volatile void __iomem *addr)
{
return *(volatile unsigned char __force *)addr;
}
static inline unsigned short
___sn_readw_relaxed (const volatile void __iomem *addr)
{
return *(volatile unsigned short __force *)addr;
}
static inline unsigned int
___sn_readl_relaxed (const volatile void __iomem *addr)
{
return *(volatile unsigned int __force *) addr;
}
static inline unsigned long
___sn_readq_relaxed (const volatile void __iomem *addr)
{
return *(volatile unsigned long __force *) addr;
}
struct pci_dev;
static inline int
sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan)
{
if (vchan > 1) {
return -1;
}
if (!(*addr >> 32)) /* Using a mask here would be cleaner */
return 0; /* but this generates better code */
if (vchan == 1) {
/* Set Bit 57 */
*addr |= (1UL << 57);
} else {
/* Clear Bit 57 */
*addr &= ~(1UL << 57);
}
return 0;
}
#endif /* _ASM_SN_IO_H */

View file

@ -1,242 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2005 Silicon Graphics, Inc.
*/
#ifndef IA64_SN_IOC3_H
#define IA64_SN_IOC3_H
/* serial port register map */
struct ioc3_serialregs {
uint32_t sscr;
uint32_t stpir;
uint32_t stcir;
uint32_t srpir;
uint32_t srcir;
uint32_t srtr;
uint32_t shadow;
};
/* SUPERIO uart register map */
struct ioc3_uartregs {
char iu_lcr;
union {
char iir; /* read only */
char fcr; /* write only */
} u3;
union {
char ier; /* DLAB == 0 */
char dlm; /* DLAB == 1 */
} u2;
union {
char rbr; /* read only, DLAB == 0 */
char thr; /* write only, DLAB == 0 */
char dll; /* DLAB == 1 */
} u1;
char iu_scr;
char iu_msr;
char iu_lsr;
char iu_mcr;
};
#define iu_rbr u1.rbr
#define iu_thr u1.thr
#define iu_dll u1.dll
#define iu_ier u2.ier
#define iu_dlm u2.dlm
#define iu_iir u3.iir
#define iu_fcr u3.fcr
struct ioc3_sioregs {
char fill[0x170];
struct ioc3_uartregs uartb;
struct ioc3_uartregs uarta;
};
/* PCI IO/mem space register map */
struct ioc3 {
uint32_t pci_id;
uint32_t pci_scr;
uint32_t pci_rev;
uint32_t pci_lat;
uint32_t pci_addr;
uint32_t pci_err_addr_l;
uint32_t pci_err_addr_h;
uint32_t sio_ir;
/* these registers are read-only for general kernel code. To
* modify them use the functions in ioc3.c
*/
uint32_t sio_ies;
uint32_t sio_iec;
uint32_t sio_cr;
uint32_t int_out;
uint32_t mcr;
uint32_t gpcr_s;
uint32_t gpcr_c;
uint32_t gpdr;
uint32_t gppr[9];
char fill[0x4c];
/* serial port registers */
uint32_t sbbr_h;
uint32_t sbbr_l;
struct ioc3_serialregs port_a;
struct ioc3_serialregs port_b;
char fill1[0x1ff10];
/* superio registers */
struct ioc3_sioregs sregs;
};
/* These don't exist on the ioc3 serial card... */
#define eier fill1[8]
#define eisr fill1[4]
#define PCI_LAT 0xc /* Latency Timer */
#define PCI_SCR_DROP_MODE_EN 0x00008000 /* drop pios on parity err */
#define UARTA_BASE 0x178
#define UARTB_BASE 0x170
/* bitmasks for serial RX status byte */
#define RXSB_OVERRUN 0x01 /* char(s) lost */
#define RXSB_PAR_ERR 0x02 /* parity error */
#define RXSB_FRAME_ERR 0x04 /* framing error */
#define RXSB_BREAK 0x08 /* break character */
#define RXSB_CTS 0x10 /* state of CTS */
#define RXSB_DCD 0x20 /* state of DCD */
#define RXSB_MODEM_VALID 0x40 /* DCD, CTS and OVERRUN are valid */
#define RXSB_DATA_VALID 0x80 /* FRAME_ERR PAR_ERR & BREAK valid */
/* bitmasks for serial TX control byte */
#define TXCB_INT_WHEN_DONE 0x20 /* interrupt after this byte is sent */
#define TXCB_INVALID 0x00 /* byte is invalid */
#define TXCB_VALID 0x40 /* byte is valid */
#define TXCB_MCR 0x80 /* data<7:0> to modem cntrl register */
#define TXCB_DELAY 0xc0 /* delay data<7:0> mSec */
/* bitmasks for SBBR_L */
#define SBBR_L_SIZE 0x00000001 /* 0 1KB rings, 1 4KB rings */
/* bitmasks for SSCR_<A:B> */
#define SSCR_RX_THRESHOLD 0x000001ff /* hiwater mark */
#define SSCR_TX_TIMER_BUSY 0x00010000 /* TX timer in progress */
#define SSCR_HFC_EN 0x00020000 /* h/w flow cntrl enabled */
#define SSCR_RX_RING_DCD 0x00040000 /* postRX record on delta-DCD */
#define SSCR_RX_RING_CTS 0x00080000 /* postRX record on delta-CTS */
#define SSCR_HIGH_SPD 0x00100000 /* 4X speed */
#define SSCR_DIAG 0x00200000 /* bypass clock divider */
#define SSCR_RX_DRAIN 0x08000000 /* drain RX buffer to memory */
#define SSCR_DMA_EN 0x10000000 /* enable ring buffer DMA */
#define SSCR_DMA_PAUSE 0x20000000 /* pause DMA */
#define SSCR_PAUSE_STATE 0x40000000 /* set when PAUSE takes effect*/
#define SSCR_RESET 0x80000000 /* reset DMA channels */
/* all producer/consumer pointers are the same bitfield */
#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */
#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */
#define PROD_CONS_PTR_OFF 3
/* bitmasks for SRCIR_<A:B> */
#define SRCIR_ARM 0x80000000 /* arm RX timer */
/* bitmasks for SHADOW_<A:B> */
#define SHADOW_DR 0x00000001 /* data ready */
#define SHADOW_OE 0x00000002 /* overrun error */
#define SHADOW_PE 0x00000004 /* parity error */
#define SHADOW_FE 0x00000008 /* framing error */
#define SHADOW_BI 0x00000010 /* break interrupt */
#define SHADOW_THRE 0x00000020 /* transmit holding reg empty */
#define SHADOW_TEMT 0x00000040 /* transmit shift reg empty */
#define SHADOW_RFCE 0x00000080 /* char in RX fifo has error */
#define SHADOW_DCTS 0x00010000 /* delta clear to send */
#define SHADOW_DDCD 0x00080000 /* delta data carrier detect */
#define SHADOW_CTS 0x00100000 /* clear to send */
#define SHADOW_DCD 0x00800000 /* data carrier detect */
#define SHADOW_DTR 0x01000000 /* data terminal ready */
#define SHADOW_RTS 0x02000000 /* request to send */
#define SHADOW_OUT1 0x04000000 /* 16550 OUT1 bit */
#define SHADOW_OUT2 0x08000000 /* 16550 OUT2 bit */
#define SHADOW_LOOP 0x10000000 /* loopback enabled */
/* bitmasks for SRTR_<A:B> */
#define SRTR_CNT 0x00000fff /* reload value for RX timer */
#define SRTR_CNT_VAL 0x0fff0000 /* current value of RX timer */
#define SRTR_CNT_VAL_SHIFT 16
#define SRTR_HZ 16000 /* SRTR clock frequency */
/* bitmasks for SIO_IR, SIO_IEC and SIO_IES */
#define SIO_IR_SA_TX_MT 0x00000001 /* Serial port A TX empty */
#define SIO_IR_SA_RX_FULL 0x00000002 /* port A RX buf full */
#define SIO_IR_SA_RX_HIGH 0x00000004 /* port A RX hiwat */
#define SIO_IR_SA_RX_TIMER 0x00000008 /* port A RX timeout */
#define SIO_IR_SA_DELTA_DCD 0x00000010 /* port A delta DCD */
#define SIO_IR_SA_DELTA_CTS 0x00000020 /* port A delta CTS */
#define SIO_IR_SA_INT 0x00000040 /* port A pass-thru intr */
#define SIO_IR_SA_TX_EXPLICIT 0x00000080 /* port A explicit TX thru */
#define SIO_IR_SA_MEMERR 0x00000100 /* port A PCI error */
#define SIO_IR_SB_TX_MT 0x00000200
#define SIO_IR_SB_RX_FULL 0x00000400
#define SIO_IR_SB_RX_HIGH 0x00000800
#define SIO_IR_SB_RX_TIMER 0x00001000
#define SIO_IR_SB_DELTA_DCD 0x00002000
#define SIO_IR_SB_DELTA_CTS 0x00004000
#define SIO_IR_SB_INT 0x00008000
#define SIO_IR_SB_TX_EXPLICIT 0x00010000
#define SIO_IR_SB_MEMERR 0x00020000
#define SIO_IR_PP_INT 0x00040000 /* P port pass-thru intr */
#define SIO_IR_PP_INTA 0x00080000 /* PP context A thru */
#define SIO_IR_PP_INTB 0x00100000 /* PP context B thru */
#define SIO_IR_PP_MEMERR 0x00200000 /* PP PCI error */
#define SIO_IR_KBD_INT 0x00400000 /* kbd/mouse intr */
#define SIO_IR_RT_INT 0x08000000 /* RT output pulse */
#define SIO_IR_GEN_INT1 0x10000000 /* RT input pulse */
#define SIO_IR_GEN_INT_SHIFT 28
/* per device interrupt masks */
#define SIO_IR_SA (SIO_IR_SA_TX_MT | \
SIO_IR_SA_RX_FULL | \
SIO_IR_SA_RX_HIGH | \
SIO_IR_SA_RX_TIMER | \
SIO_IR_SA_DELTA_DCD | \
SIO_IR_SA_DELTA_CTS | \
SIO_IR_SA_INT | \
SIO_IR_SA_TX_EXPLICIT | \
SIO_IR_SA_MEMERR)
#define SIO_IR_SB (SIO_IR_SB_TX_MT | \
SIO_IR_SB_RX_FULL | \
SIO_IR_SB_RX_HIGH | \
SIO_IR_SB_RX_TIMER | \
SIO_IR_SB_DELTA_DCD | \
SIO_IR_SB_DELTA_CTS | \
SIO_IR_SB_INT | \
SIO_IR_SB_TX_EXPLICIT | \
SIO_IR_SB_MEMERR)
#define SIO_IR_PP (SIO_IR_PP_INT | SIO_IR_PP_INTA | \
SIO_IR_PP_INTB | SIO_IR_PP_MEMERR)
#define SIO_IR_RT (SIO_IR_RT_INT | SIO_IR_GEN_INT1)
/* bitmasks for SIO_CR */
#define SIO_CR_CMD_PULSE_SHIFT 15
#define SIO_CR_SER_A_BASE_SHIFT 1
#define SIO_CR_SER_B_BASE_SHIFT 8
#define SIO_CR_ARB_DIAG 0x00380000 /* cur !enet PCI requet (ro) */
#define SIO_CR_ARB_DIAG_TXA 0x00000000
#define SIO_CR_ARB_DIAG_RXA 0x00080000
#define SIO_CR_ARB_DIAG_TXB 0x00100000
#define SIO_CR_ARB_DIAG_RXB 0x00180000
#define SIO_CR_ARB_DIAG_PP 0x00200000
#define SIO_CR_ARB_DIAG_IDLE 0x00400000 /* 0 -> active request (ro) */
/* defs for some of the generic I/O pins */
#define GPCR_PHY_RESET 0x20 /* pin is output to PHY reset */
#define GPCR_UARTB_MODESEL 0x40 /* pin is output to port B mode sel */
#define GPCR_UARTA_MODESEL 0x80 /* pin is output to port A mode sel */
#define GPPR_PHY_RESET_PIN 5 /* GIO pin controlling phy reset */
#define GPPR_UARTB_MODESEL_PIN 6 /* GIO pin cntrling uartb modeselect */
#define GPPR_UARTA_MODESEL_PIN 7 /* GIO pin cntrling uarta modeselect */
#endif /* IA64_SN_IOC3_H */

View file

@ -1,246 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Derived from IRIX <sys/SN/klconfig.h>.
*
* Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 1999 by Ralf Baechle
*/
#ifndef _ASM_IA64_SN_KLCONFIG_H
#define _ASM_IA64_SN_KLCONFIG_H
/*
* The KLCONFIG structures store info about the various BOARDs found
* during Hardware Discovery. In addition, it stores info about the
* components found on the BOARDs.
*/
typedef s32 klconf_off_t;
/* Functions/macros needed to use this structure */
typedef struct kl_config_hdr {
char pad[20];
klconf_off_t ch_board_info; /* the link list of boards */
char pad0[88];
} kl_config_hdr_t;
#define NODE_OFFSET_TO_LBOARD(nasid,off) (lboard_t*)(GLOBAL_CAC_ADDR((nasid), (off)))
/*
* The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
* can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to
* the LOCAL/current NODE. REMOTE means it is attached to a different
* node.(TBD - Need a way to treat ROUTER boards.)
*
* There are 2 different structures to represent these boards -
* lboard - Local board, rboard - remote board. These 2 structures
* can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer
* Figure below). The first byte of the rboard or lboard structure
* is used to find out its type - no unions are used.
* If it is a lboard, then the config info of this board will be found
* on the local node. (LOCAL NODE BASE + offset value gives pointer to
* the structure.
* If it is a rboard, the local structure contains the node number
* and the offset of the beginning of the LINKED LIST on the remote node.
* The details of the hardware on a remote node can be built locally,
* if required, by reading the LINKED LIST on the remote node and
* ignoring all the rboards on that node.
*
* The local node uses the REMOTE NODE NUMBER + OFFSET to point to the
* First board info on the remote node. The remote node list is
* traversed as the local list, using the REMOTE BASE ADDRESS and not
* the local base address and ignoring all rboard values.
*
*
KLCONFIG
+------------+ +------------+ +------------+ +------------+
| lboard | +-->| lboard | +-->| rboard | +-->| lboard |
+------------+ | +------------+ | +------------+ | +------------+
| board info | | | board info | | |errinfo,bptr| | | board info |
+------------+ | +------------+ | +------------+ | +------------+
| offset |--+ | offset |--+ | offset |--+ |offset=NULL |
+------------+ +------------+ +------------+ +------------+
+------------+
| board info |
+------------+ +--------------------------------+
| compt 1 |------>| type, rev, diaginfo, size ... | (CPU)
+------------+ +--------------------------------+
| compt 2 |--+
+------------+ | +--------------------------------+
| ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK)
+------------+ +--------------------------------+
| errinfo |--+
+------------+ | +--------------------------------+
+--->|r/l brd errinfo,compt err flags |
+--------------------------------+
*
* Each BOARD consists of COMPONENTs and the BOARD structure has
* pointers (offsets) to its COMPONENT structure.
* The COMPONENT structure has version info, size and speed info, revision,
* error info and the NIC info. This structure can accommodate any
* BOARD with arbitrary COMPONENT composition.
*
* The ERRORINFO part of each BOARD has error information
* that describes errors about the BOARD itself. It also has flags to
* indicate the COMPONENT(s) on the board that have errors. The error
* information specific to the COMPONENT is present in the respective
* COMPONENT structure.
*
* The ERRORINFO structure is also treated like a COMPONENT, ie. the
* BOARD has pointers(offset) to the ERRORINFO structure. The rboard
* structure also has a pointer to the ERRORINFO structure. This is
* the place to store ERRORINFO about a REMOTE NODE, if the HUB on
* that NODE is not working or if the REMOTE MEMORY is BAD. In cases where
* only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can
* be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info
* which is present on the REMOTE NODE.(TBD)
* REMOTE ERRINFO can be stored on any of the nearest nodes
* or on all the nearest nodes.(TBD)
* Like BOARD structures, REMOTE ERRINFO structures can be built locally
* using the rboard errinfo pointer.
*
* In order to get useful information from this Data organization, a set of
* interface routines are provided (TBD). The important thing to remember while
* manipulating the structures, is that, the NODE number information should
* be used. If the NODE is non-zero (remote) then each offset should
* be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR.
* This includes offsets for BOARDS, COMPONENTS and ERRORINFO.
*
* Note that these structures do not provide much info about connectivity.
* That info will be part of HWGRAPH, which is an extension of the cfg_t
* data structure. (ref IP27prom/cfg.h) It has to be extended to include
* the IO part of the Network(TBD).
*
* The data structures below define the above concepts.
*/
/*
* BOARD classes
*/
#define KLCLASS_MASK 0xf0
#define KLCLASS_NONE 0x00
#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */
#define KLCLASS_CPU KLCLASS_NODE
#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI
and the non-graphics widget boards */
#define KLCLASS_ROUTER 0x30 /* Router board */
#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board
so that we can record error info */
#define KLCLASS_IOBRICK 0x70 /* IP35 iobrick */
#define KLCLASS_MAX 8 /* Bump this if a new CLASS is added */
#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
/*
* board types
*/
#define KLTYPE_MASK 0x0f
#define KLTYPE(_x) ((_x) & KLTYPE_MASK)
#define KLTYPE_SNIA (KLCLASS_CPU | 0x1)
#define KLTYPE_TIO (KLCLASS_CPU | 0x2)
#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1)
#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
#define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4)
#define KLTYPE_IOBRICK_XBOW (KLCLASS_MIDPLANE | 0x2)
#define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0)
#define KLTYPE_NBRICK (KLCLASS_IOBRICK | 0x4)
#define KLTYPE_PXBRICK (KLCLASS_IOBRICK | 0x6)
#define KLTYPE_IXBRICK (KLCLASS_IOBRICK | 0x7)
#define KLTYPE_CGBRICK (KLCLASS_IOBRICK | 0x8)
#define KLTYPE_OPUSBRICK (KLCLASS_IOBRICK | 0x9)
#define KLTYPE_SABRICK (KLCLASS_IOBRICK | 0xa)
#define KLTYPE_IABRICK (KLCLASS_IOBRICK | 0xb)
#define KLTYPE_PABRICK (KLCLASS_IOBRICK | 0xc)
#define KLTYPE_GABRICK (KLCLASS_IOBRICK | 0xd)
/*
* board structures
*/
#define MAX_COMPTS_PER_BRD 24
typedef struct lboard_s {
klconf_off_t brd_next_any; /* Next BOARD */
unsigned char struct_type; /* type of structure, local or remote */
unsigned char brd_type; /* type+class */
unsigned char brd_sversion; /* version of this structure */
unsigned char brd_brevision; /* board revision */
unsigned char brd_promver; /* board prom version, if any */
unsigned char brd_flags; /* Enabled, Disabled etc */
unsigned char brd_slot; /* slot number */
unsigned short brd_debugsw; /* Debug switches */
geoid_t brd_geoid; /* geo id */
partid_t brd_partition; /* Partition number */
unsigned short brd_diagval; /* diagnostic value */
unsigned short brd_diagparm; /* diagnostic parameter */
unsigned char brd_inventory; /* inventory history */
unsigned char brd_numcompts; /* Number of components */
nic_t brd_nic; /* Number in CAN */
nasid_t brd_nasid; /* passed parameter */
klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
klconf_off_t brd_errinfo; /* Board's error information */
struct lboard_s *brd_parent; /* Logical parent for this brd */
char pad0[4];
unsigned char brd_confidence; /* confidence that the board is bad */
nasid_t brd_owner; /* who owns this board */
unsigned char brd_nic_flags; /* To handle 8 more NICs */
char pad1[24]; /* future expansion */
char brd_name[32];
nasid_t brd_next_same_host; /* host of next brd w/same nasid */
klconf_off_t brd_next_same; /* Next BOARD with same nasid */
} lboard_t;
/*
* Generic info structure. This stores common info about a
* component.
*/
typedef struct klinfo_s { /* Generic info */
unsigned char struct_type; /* type of this structure */
unsigned char struct_version; /* version of this structure */
unsigned char flags; /* Enabled, disabled etc */
unsigned char revision; /* component revision */
unsigned short diagval; /* result of diagnostics */
unsigned short diagparm; /* diagnostic parameter */
unsigned char inventory; /* previous inventory status */
unsigned short partid; /* widget part number */
nic_t nic; /* MUst be aligned properly */
unsigned char physid; /* physical id of component */
unsigned int virtid; /* virtual id as seen by system */
unsigned char widid; /* Widget id - if applicable */
nasid_t nasid; /* node number - from parent */
char pad1; /* pad out structure. */
char pad2; /* pad out structure. */
void *data;
klconf_off_t errinfo; /* component specific errors */
unsigned short pad3; /* pci fields have moved over to */
unsigned short pad4; /* klbri_t */
} klinfo_t ;
static inline lboard_t *find_lboard_next(lboard_t * brd)
{
if (brd && brd->brd_next_any)
return NODE_OFFSET_TO_LBOARD(NASID_GET(brd), brd->brd_next_any);
return NULL;
}
#endif /* _ASM_IA64_SN_KLCONFIG_H */

View file

@ -1,51 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_L1_H
#define _ASM_IA64_SN_L1_H
/* brick type response codes */
#define L1_BRICKTYPE_PX 0x23 /* # */
#define L1_BRICKTYPE_PE 0x25 /* % */
#define L1_BRICKTYPE_N_p0 0x26 /* & */
#define L1_BRICKTYPE_IP45 0x34 /* 4 */
#define L1_BRICKTYPE_IP41 0x35 /* 5 */
#define L1_BRICKTYPE_TWISTER 0x36 /* 6 */ /* IP53 & ROUTER */
#define L1_BRICKTYPE_IX 0x3d /* = */
#define L1_BRICKTYPE_IP34 0x61 /* a */
#define L1_BRICKTYPE_GA 0x62 /* b */
#define L1_BRICKTYPE_C 0x63 /* c */
#define L1_BRICKTYPE_OPUS_TIO 0x66 /* f */
#define L1_BRICKTYPE_I 0x69 /* i */
#define L1_BRICKTYPE_N 0x6e /* n */
#define L1_BRICKTYPE_OPUS 0x6f /* o */
#define L1_BRICKTYPE_P 0x70 /* p */
#define L1_BRICKTYPE_R 0x72 /* r */
#define L1_BRICKTYPE_CHI_CG 0x76 /* v */
#define L1_BRICKTYPE_X 0x78 /* x */
#define L1_BRICKTYPE_X2 0x79 /* y */
#define L1_BRICKTYPE_SA 0x5e /* ^ */
#define L1_BRICKTYPE_PA 0x6a /* j */
#define L1_BRICKTYPE_IA 0x6b /* k */
#define L1_BRICKTYPE_ATHENA 0x2b /* + */
#define L1_BRICKTYPE_DAYTONA 0x7a /* z */
#define L1_BRICKTYPE_1932 0x2c /* . */
#define L1_BRICKTYPE_191010 0x2e /* , */
/* board type response codes */
#define L1_BOARDTYPE_IP69 0x0100 /* CA */
#define L1_BOARDTYPE_IP63 0x0200 /* CB */
#define L1_BOARDTYPE_BASEIO 0x0300 /* IB */
#define L1_BOARDTYPE_PCIE2SLOT 0x0400 /* IC */
#define L1_BOARDTYPE_PCIX3SLOT 0x0500 /* ID */
#define L1_BOARDTYPE_PCIXPCIE4SLOT 0x0600 /* IE */
#define L1_BOARDTYPE_ABACUS 0x0700 /* AB */
#define L1_BOARDTYPE_DAYTONA 0x0800 /* AD */
#define L1_BOARDTYPE_INVAL (-1) /* invalid brick type */
#endif /* _ASM_IA64_SN_L1_H */

View file

@ -1,33 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_LEDS_H
#define _ASM_IA64_SN_LEDS_H
#include <asm/sn/addrs.h>
#include <asm/sn/pda.h>
#include <asm/sn/shub_mmr.h>
#define LED0 (LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0))
#define LED_CPU_SHIFT 16
#define LED_CPU_HEARTBEAT 0x01
#define LED_CPU_ACTIVITY 0x02
#define LED_ALWAYS_SET 0x00
/*
* Basic macros for flashing the LEDS on an SGI SN.
*/
static __inline__ void
set_led_bits(u8 value, u8 mask)
{
pda->led_state = (pda->led_state & ~mask) | (value & mask);
*pda->led_address = (short) pda->led_state;
}
#endif /* _ASM_IA64_SN_LEDS_H */

View file

@ -1,127 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_MODULE_H
#define _ASM_IA64_SN_MODULE_H
/* parameter for format_module_id() */
#define MODULE_FORMAT_BRIEF 1
#define MODULE_FORMAT_LONG 2
#define MODULE_FORMAT_LCD 3
/*
* Module id format
*
* 31-16 Rack ID (encoded class, group, number - 16-bit unsigned int)
* 15-8 Brick type (8-bit ascii character)
* 7-0 Bay (brick position in rack (0-63) - 8-bit unsigned int)
*
*/
/*
* Macros for getting the brick type
*/
#define MODULE_BTYPE_MASK 0xff00
#define MODULE_BTYPE_SHFT 8
#define MODULE_GET_BTYPE(_m) (((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT)
#define MODULE_BT_TO_CHAR(_b) ((char)(_b))
#define MODULE_GET_BTCHAR(_m) (MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m)))
/*
* Macros for getting the rack ID.
*/
#define MODULE_RACK_MASK 0xffff0000
#define MODULE_RACK_SHFT 16
#define MODULE_GET_RACK(_m) (((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT)
/*
* Macros for getting the brick position
*/
#define MODULE_BPOS_MASK 0x00ff
#define MODULE_BPOS_SHFT 0
#define MODULE_GET_BPOS(_m) (((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT)
/*
* Macros for encoding and decoding rack IDs
* A rack number consists of three parts:
* class (0==CPU/mixed, 1==I/O), group, number
*
* Rack number is stored just as it is displayed on the screen:
* a 3-decimal-digit number.
*/
#define RACK_CLASS_DVDR 100
#define RACK_GROUP_DVDR 10
#define RACK_NUM_DVDR 1
#define RACK_CREATE_RACKID(_c, _g, _n) ((_c) * RACK_CLASS_DVDR + \
(_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR)
#define RACK_GET_CLASS(_r) ((_r) / RACK_CLASS_DVDR)
#define RACK_GET_GROUP(_r) (((_r) - RACK_GET_CLASS(_r) * \
RACK_CLASS_DVDR) / RACK_GROUP_DVDR)
#define RACK_GET_NUM(_r) (((_r) - RACK_GET_CLASS(_r) * \
RACK_CLASS_DVDR - RACK_GET_GROUP(_r) * \
RACK_GROUP_DVDR) / RACK_NUM_DVDR)
/*
* Macros for encoding and decoding rack IDs
* A rack number consists of three parts:
* class 1 bit, 0==CPU/mixed, 1==I/O
* group 2 bits for CPU/mixed, 3 bits for I/O
* number 3 bits for CPU/mixed, 2 bits for I/O (1 based)
*/
#define RACK_GROUP_BITS(_r) (RACK_GET_CLASS(_r) ? 3 : 2)
#define RACK_NUM_BITS(_r) (RACK_GET_CLASS(_r) ? 2 : 3)
#define RACK_CLASS_MASK(_r) 0x20
#define RACK_CLASS_SHFT(_r) 5
#define RACK_ADD_CLASS(_r, _c) \
((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r))
#define RACK_GROUP_SHFT(_r) RACK_NUM_BITS(_r)
#define RACK_GROUP_MASK(_r) \
( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) )
#define RACK_ADD_GROUP(_r, _g) \
((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r))
#define RACK_NUM_SHFT(_r) 0
#define RACK_NUM_MASK(_r) \
( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) )
#define RACK_ADD_NUM(_r, _n) \
((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r))
/*
* Brick type definitions
*/
#define MAX_BRICK_TYPES 256 /* brick type is stored as uchar */
extern char brick_types[];
#define MODULE_CBRICK 0
#define MODULE_RBRICK 1
#define MODULE_IBRICK 2
#define MODULE_KBRICK 3
#define MODULE_XBRICK 4
#define MODULE_DBRICK 5
#define MODULE_PBRICK 6
#define MODULE_NBRICK 7
#define MODULE_PEBRICK 8
#define MODULE_PXBRICK 9
#define MODULE_IXBRICK 10
#define MODULE_CGBRICK 11
#define MODULE_OPUSBRICK 12
#define MODULE_SABRICK 13 /* TIO BringUp Brick */
#define MODULE_IABRICK 14
#define MODULE_PABRICK 15
#define MODULE_GABRICK 16
#define MODULE_OPUS_TIO 17 /* OPUS TIO Riser */
extern char brick_types[];
extern void format_module_id(char *, moduleid_t, int);
#endif /* _ASM_IA64_SN_MODULE_H */

View file

@ -1,59 +0,0 @@
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2001-2008 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_MSPEC_H
#define _ASM_IA64_SN_MSPEC_H
#define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */
#define FETCHOP_LOAD 0
#define FETCHOP_INCREMENT 8
#define FETCHOP_DECREMENT 16
#define FETCHOP_CLEAR 24
#define FETCHOP_STORE 0
#define FETCHOP_AND 24
#define FETCHOP_OR 32
#define FETCHOP_CLEAR_CACHE 56
#define FETCHOP_LOAD_OP(addr, op) ( \
*(volatile long *)((char*) (addr) + (op)))
#define FETCHOP_STORE_OP(addr, op, x) ( \
*(volatile long *)((char*) (addr) + (op)) = (long) (x))
#ifdef __KERNEL__
/*
* Each Atomic Memory Operation (amo, formerly known as fetchop)
* variable is 64 bytes long. The first 8 bytes are used. The
* remaining 56 bytes are unaddressable due to the operation taking
* that portion of the address.
*
* NOTE: The amo structure _MUST_ be placed in either the first or second
* half of the cache line. The cache line _MUST NOT_ be used for anything
* other than additional amo entries. This is because there are two
* addresses which reference the same physical cache line. One will
* be a cached entry with the memory type bits all set. This address
* may be loaded into processor cache. The amo will be referenced
* uncached via the memory special memory type. If any portion of the
* cached cache-line is modified, when that line is flushed, it will
* overwrite the uncached value in physical memory and lead to
* inconsistency.
*/
struct amo {
u64 variable;
u64 unused[7];
};
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_SN_MSPEC_H */

View file

@ -1,82 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_NODEPDA_H
#define _ASM_IA64_SN_NODEPDA_H
#include <asm/irq.h>
#include <asm/sn/arch.h>
#include <asm/sn/intr.h>
#include <asm/sn/bte.h>
/*
* NUMA Node-Specific Data structures are defined in this file.
* In particular, this is the location of the node PDA.
* A pointer to the right node PDA is saved in each CPU PDA.
*/
/*
* Node-specific data structure.
*
* One of these structures is allocated on each node of a NUMA system.
*
* This structure provides a convenient way of keeping together
* all per-node data structures.
*/
struct phys_cpuid {
short nasid;
char subnode;
char slice;
};
struct nodepda_s {
void *pdinfo; /* Platform-dependent per-node info */
/*
* The BTEs on this node are shared by the local cpus
*/
struct bteinfo_s bte_if[MAX_BTES_PER_NODE]; /* Virtual Interface */
struct timer_list bte_recovery_timer;
spinlock_t bte_recovery_lock;
/*
* Array of pointers to the nodepdas for each node.
*/
struct nodepda_s *pernode_pdaindr[MAX_COMPACT_NODES];
/*
* Array of physical cpu identifiers. Indexed by cpuid.
*/
struct phys_cpuid phys_cpuid[NR_CPUS];
spinlock_t ptc_lock ____cacheline_aligned_in_smp;
};
typedef struct nodepda_s nodepda_t;
/*
* Access Functions for node PDA.
* Since there is one nodepda for each node, we need a convenient mechanism
* to access these nodepdas without cluttering code with #ifdefs.
* The next set of definitions provides this.
* Routines are expected to use
*
* sn_nodepda - to access node PDA for the node on which code is running
* NODEPDA(cnodeid) - to access node PDA for cnodeid
*/
DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
#define sn_nodepda __this_cpu_read(__sn_nodepda)
#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid])
/*
* Check if given a compact node id the corresponding node has all the
* cpus disabled.
*/
#define is_headless_node(cnodeid) (nr_cpus_node(cnodeid) == 0)
#endif /* _ASM_IA64_SN_NODEPDA_H */

View file

@ -1,150 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
/* Workarounds */
#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */
#define BUSTYPE_MASK 0x1
/* Macros given a pcibus structure */
#define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK)
#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
asic == PCIIO_ASIC_TYPE_TIOCP)
#define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
#define IS_TIOCP_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_TIOCP)
/*
* The different PCI Bridge types supported on the SGI Altix platforms
*/
#define PCIBR_BRIDGETYPE_UNKNOWN -1
#define PCIBR_BRIDGETYPE_PIC 2
#define PCIBR_BRIDGETYPE_TIOCP 3
/*
* Bridge 64bit Direct Map Attributes
*/
#define PCI64_ATTR_PREF (1ull << 59)
#define PCI64_ATTR_PREC (1ull << 58)
#define PCI64_ATTR_VIRTUAL (1ull << 57)
#define PCI64_ATTR_BAR (1ull << 56)
#define PCI64_ATTR_SWAP (1ull << 55)
#define PCI64_ATTR_VIRTUAL1 (1ull << 54)
#define PCI32_LOCAL_BASE 0
#define PCI32_MAPPED_BASE 0x40000000
#define PCI32_DIRECT_BASE 0x80000000
#define IS_PCI32_MAPPED(x) ((u64)(x) < PCI32_DIRECT_BASE && \
(u64)(x) >= PCI32_MAPPED_BASE)
#define IS_PCI32_DIRECT(x) ((u64)(x) >= PCI32_MAPPED_BASE)
/*
* Bridge PMU Address Transaltion Entry Attibutes
*/
#define PCI32_ATE_V (0x1 << 0)
#define PCI32_ATE_CO (0x1 << 1) /* PIC ASIC ONLY */
#define PCI32_ATE_PIO (0x1 << 1) /* TIOCP ASIC ONLY */
#define PCI32_ATE_MSI (0x1 << 2)
#define PCI32_ATE_PREF (0x1 << 3)
#define PCI32_ATE_BAR (0x1 << 4)
#define PCI32_ATE_ADDR_SHFT 12
#define MINIMAL_ATES_REQUIRED(addr, size) \
(IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1))
#define MINIMAL_ATE_FLAG(addr, size) \
(MINIMAL_ATES_REQUIRED((u64)addr, size) ? 1 : 0)
/* bit 29 of the pci address is the SWAP bit */
#define ATE_SWAPSHIFT 29
#define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT))
#define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT))
/*
* I/O page size
*/
#if PAGE_SIZE < 16384
#define IOPFNSHIFT 12 /* 4K per mapped page */
#else
#define IOPFNSHIFT 14 /* 16K per mapped page */
#endif
#define IOPGSIZE (1 << IOPFNSHIFT)
#define IOPG(x) ((x) >> IOPFNSHIFT)
#define IOPGOFF(x) ((x) & (IOPGSIZE-1))
#define PCIBR_DEV_SWAP_DIR (1ull << 19)
#define PCIBR_CTRL_PAGE_SIZE (0x1 << 21)
/*
* PMU resources.
*/
struct ate_resource{
u64 *ate;
u64 num_ate;
u64 lowest_free_index;
};
struct pcibus_info {
struct pcibus_bussoft pbi_buscommon; /* common header */
u32 pbi_moduleid;
short pbi_bridge_type;
short pbi_bridge_mode;
struct ate_resource pbi_int_ate_resource;
u64 pbi_int_ate_size;
u64 pbi_dir_xbase;
char pbi_hub_xid;
u64 pbi_devreg[8];
u32 pbi_valid_devices;
u32 pbi_enabled_devices;
spinlock_t pbi_lock;
};
extern int pcibr_init_provider(void);
extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *);
extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type);
extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type);
extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
/*
* prototypes for the bridge asic register access routines in pcibr_reg.c
*/
extern void pcireg_control_bit_clr(struct pcibus_info *, u64);
extern void pcireg_control_bit_set(struct pcibus_info *, u64);
extern u64 pcireg_tflush_get(struct pcibus_info *);
extern u64 pcireg_intr_status_get(struct pcibus_info *);
extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, u64);
extern void pcireg_intr_enable_bit_set(struct pcibus_info *, u64);
extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, u64);
extern void pcireg_force_intr_set(struct pcibus_info *, int);
extern u64 pcireg_wrb_flush_get(struct pcibus_info *, int);
extern void pcireg_int_ate_set(struct pcibus_info *, int, u64);
extern u64 __iomem * pcireg_int_ate_addr(struct pcibus_info *, int);
extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info);
extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info);
extern int pcibr_ate_alloc(struct pcibus_info *, int);
extern void pcibr_ate_free(struct pcibus_info *, int);
extern void ate_write(struct pcibus_info *, int, int, u64);
extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device,
void *resp, char **ssdt);
extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device,
int action, void *resp);
extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus);
#endif

View file

@ -1,68 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
/*
* SN pci asic types. Do not ever renumber these or reuse values. The
* values must agree with what prom thinks they are.
*/
#define PCIIO_ASIC_TYPE_UNKNOWN 0
#define PCIIO_ASIC_TYPE_PPB 1
#define PCIIO_ASIC_TYPE_PIC 2
#define PCIIO_ASIC_TYPE_TIOCP 3
#define PCIIO_ASIC_TYPE_TIOCA 4
#define PCIIO_ASIC_TYPE_TIOCE 5
#define PCIIO_ASIC_MAX_TYPES 6
/*
* Common pciio bus provider data. There should be one of these as the
* first field in any pciio based provider soft structure (e.g. pcibr_soft
* tioca_soft, etc).
*/
struct pcibus_bussoft {
u32 bs_asic_type; /* chipset type */
u32 bs_xid; /* xwidget id */
u32 bs_persist_busnum; /* Persistent Bus Number */
u32 bs_persist_segment; /* Segment Number */
u64 bs_legacy_io; /* legacy io pio addr */
u64 bs_legacy_mem; /* legacy mem pio addr */
u64 bs_base; /* widget base */
struct xwidget_info *bs_xwidget_info;
};
struct pci_controller;
/*
* SN pci bus indirection
*/
struct sn_pcibus_provider {
dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t, int flags);
dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags);
void (*dma_unmap)(struct pci_dev *, dma_addr_t, int);
void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *);
void (*force_interrupt)(struct sn_irq_info *);
void (*target_interrupt)(struct sn_irq_info *);
};
/*
* Flags used by the map interfaces
* bits 3:0 specifies format of passed in address
* bit 4 specifies that address is to be used for MSI
*/
#define SN_DMA_ADDRTYPE(x) ((x) & 0xf)
#define SN_DMA_ADDR_PHYS 1 /* address is an xio address. */
#define SN_DMA_ADDR_XIO 2 /* address is phys memory */
#define SN_DMA_MSI 0x10 /* Bus address is to be used for MSI */
extern struct sn_pcibus_provider *sn_pci_provider[];
#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */

View file

@ -1,85 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
#define _ASM_IA64_SN_PCI_PCIDEV_H
#include <linux/pci.h>
/*
* In ia64, pci_dev->sysdata must be a *pci_controller. To provide access to
* the pcidev_info structs for all devices under a controller, we keep a
* list of pcidev_info under pci_controller->platform_data.
*/
struct sn_platform_data {
void *provider_soft;
struct list_head pcidev_info;
};
#define SN_PLATFORM_DATA(busdev) \
((struct sn_platform_data *)(PCI_CONTROLLER(busdev)->platform_data))
#define SN_PCIDEV_INFO(dev) sn_pcidev_info_get(dev)
/*
* Given a pci_bus, return the sn pcibus_bussoft struct. Note that
* this only works for root busses, not for busses represented by PPB's.
*/
#define SN_PCIBUS_BUSSOFT(pci_bus) \
((struct pcibus_bussoft *)(SN_PLATFORM_DATA(pci_bus)->provider_soft))
#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
((struct pcibus_info *)(SN_PLATFORM_DATA(pci_bus)->provider_soft))
/*
* Given a struct pci_dev, return the sn pcibus_bussoft struct. Note
* that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
* due to possible PPB's in the path.
*/
#define SN_PCIDEV_BUSSOFT(pci_dev) \
(SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
#define SN_PCIDEV_BUSPROVIDER(pci_dev) \
(SN_PCIDEV_INFO(pci_dev)->pdi_provider)
#define PCIIO_BUS_NONE 255 /* bus 255 reserved */
#define PCIIO_SLOT_NONE 255
#define PCIIO_FUNC_NONE 255
#define PCIIO_VENDOR_ID_NONE (-1)
struct pcidev_info {
u64 pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */
u64 pdi_slot_host_handle; /* Bus and devfn Host pci_dev */
struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */
struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */
struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */
struct sn_irq_info *pdi_sn_irq_info;
struct sn_pcibus_provider *pdi_provider; /* sn pci ops */
struct pci_dev *host_pci_dev; /* host bus link */
struct list_head pdi_list; /* List of pcidev_info */
};
extern void sn_irq_fixup(struct pci_dev *pci_dev,
struct sn_irq_info *sn_irq_info);
extern void sn_irq_unfixup(struct pci_dev *pci_dev);
extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *);
extern void sn_bus_fixup(struct pci_bus *);
extern void sn_acpi_bus_fixup(struct pci_bus *);
extern void sn_common_bus_fixup(struct pci_bus *, struct pcibus_bussoft *);
extern void sn_bus_store_sysdata(struct pci_dev *dev);
extern void sn_bus_free_sysdata(void);
extern void sn_generate_path(struct pci_bus *pci_bus, char *address);
extern void sn_io_slot_fixup(struct pci_dev *);
extern void sn_acpi_slot_fixup(struct pci_dev *);
extern void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *,
struct sn_irq_info *);
extern void sn_pci_unfixup_slot(struct pci_dev *dev);
extern void sn_irq_lh_init(void);
#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */

View file

@ -1,68 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PDA_H
#define _ASM_IA64_SN_PDA_H
#include <linux/cache.h>
#include <asm/percpu.h>
/*
* CPU-specific data structure.
*
* One of these structures is allocated for each cpu of a NUMA system.
*
* This structure provides a convenient way of keeping together
* all SN per-cpu data structures.
*/
typedef struct pda_s {
/*
* Support for SN LEDs
*/
volatile short *led_address;
u8 led_state;
u8 hb_state; /* supports blinking heartbeat leds */
unsigned int hb_count;
unsigned int idle_flag;
volatile unsigned long *bedrock_rev_id;
volatile unsigned long *pio_write_status_addr;
unsigned long pio_write_status_val;
volatile unsigned long *pio_shub_war_cam_addr;
unsigned long sn_in_service_ivecs[4];
int sn_lb_int_war_ticks;
int sn_last_irq;
int sn_first_irq;
} pda_t;
#define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
/*
* PDA
* Per-cpu private data area for each cpu. The PDA is located immediately after
* the IA64 cpu_data area. A full page is allocated for the cp_data area for each
* cpu but only a small amout of the page is actually used. We put the SNIA PDA
* in the same page as the cpu_data area. Note that there is a check in the setup
* code to verify that we don't overflow the page.
*
* Seems like we should should cache-line align the pda so that any changes in the
* size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
* or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
*/
DECLARE_PER_CPU(struct pda_s, pda_percpu);
#define pda (&__ia64_per_cpu_var(pda_percpu))
#define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
#endif /* _ASM_IA64_SN_PDA_H */

View file

@ -1,261 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PIC_H
#define _ASM_IA64_SN_PCI_PIC_H
/*
* PIC AS DEVICE ZERO
* ------------------
*
* PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC)
* be designated as 'device 0'. That is a departure from earlier SGI
* PCI bridges. Because of that we use config space 1 to access the
* config space of the first actual PCI device on the bus.
* Here's what the PIC manual says:
*
* The current PCI-X bus specification now defines that the parent
* hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC
* reduced the total number of devices from 8 to 4 and removed the
* device registers and windows, now only supporting devices 0,1,2, and
* 3. PIC did leave all 8 configuration space windows. The reason was
* there was nothing to gain by removing them. Here in lies the problem.
* The device numbering we do using 0 through 3 is unrelated to the device
* numbering which PCI-X requires in configuration space. In the past we
* correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc.
* PCI-X requires we start a 1, not 0 and currently the PX brick
* does associate our:
*
* device 0 with configuration space window 1,
* device 1 with configuration space window 2,
* device 2 with configuration space window 3,
* device 3 with configuration space window 4.
*
* The net effect is that all config space access are off-by-one with
* relation to other per-slot accesses on the PIC.
* Here is a table that shows some of that:
*
* Internal Slot#
* |
* | 0 1 2 3
* ----------|---------------------------------------
* config | 0x21000 0x22000 0x23000 0x24000
* |
* even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd
* |
* odd rrb | n/a 0[1] n/a 1[1]
* |
* int dev | 00 01 10 11
* |
* ext slot# | 1 2 3 4
* ----------|---------------------------------------
*/
#define PIC_ATE_TARGETID_SHFT 8
#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFFUL
#define PIC_PCI64_ATTR_TARG_SHFT 60
/*****************************************************************************
*********************** PIC MMR structure mapping ***************************
*****************************************************************************/
/* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0]
* of a 64-bit register. When writing PIC registers, always write the
* entire 64 bits.
*/
struct pic {
/* 0x000000-0x00FFFF -- Local Registers */
/* 0x000000-0x000057 -- Standard Widget Configuration */
u64 p_wid_id; /* 0x000000 */
u64 p_wid_stat; /* 0x000008 */
u64 p_wid_err_upper; /* 0x000010 */
u64 p_wid_err_lower; /* 0x000018 */
#define p_wid_err p_wid_err_lower
u64 p_wid_control; /* 0x000020 */
u64 p_wid_req_timeout; /* 0x000028 */
u64 p_wid_int_upper; /* 0x000030 */
u64 p_wid_int_lower; /* 0x000038 */
#define p_wid_int p_wid_int_lower
u64 p_wid_err_cmdword; /* 0x000040 */
u64 p_wid_llp; /* 0x000048 */
u64 p_wid_tflush; /* 0x000050 */
/* 0x000058-0x00007F -- Bridge-specific Widget Configuration */
u64 p_wid_aux_err; /* 0x000058 */
u64 p_wid_resp_upper; /* 0x000060 */
u64 p_wid_resp_lower; /* 0x000068 */
#define p_wid_resp p_wid_resp_lower
u64 p_wid_tst_pin_ctrl; /* 0x000070 */
u64 p_wid_addr_lkerr; /* 0x000078 */
/* 0x000080-0x00008F -- PMU & MAP */
u64 p_dir_map; /* 0x000080 */
u64 _pad_000088; /* 0x000088 */
/* 0x000090-0x00009F -- SSRAM */
u64 p_map_fault; /* 0x000090 */
u64 _pad_000098; /* 0x000098 */
/* 0x0000A0-0x0000AF -- Arbitration */
u64 p_arb; /* 0x0000A0 */
u64 _pad_0000A8; /* 0x0000A8 */
/* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
u64 p_ate_parity_err; /* 0x0000B0 */
u64 _pad_0000B8; /* 0x0000B8 */
/* 0x0000C0-0x0000FF -- PCI/GIO */
u64 p_bus_timeout; /* 0x0000C0 */
u64 p_pci_cfg; /* 0x0000C8 */
u64 p_pci_err_upper; /* 0x0000D0 */
u64 p_pci_err_lower; /* 0x0000D8 */
#define p_pci_err p_pci_err_lower
u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */
/* 0x000100-0x0001FF -- Interrupt */
u64 p_int_status; /* 0x000100 */
u64 p_int_enable; /* 0x000108 */
u64 p_int_rst_stat; /* 0x000110 */
u64 p_int_mode; /* 0x000118 */
u64 p_int_device; /* 0x000120 */
u64 p_int_host_err; /* 0x000128 */
u64 p_int_addr[8]; /* 0x0001{30,,,68} */
u64 p_err_int_view; /* 0x000170 */
u64 p_mult_int; /* 0x000178 */
u64 p_force_always[8]; /* 0x0001{80,,,B8} */
u64 p_force_pin[8]; /* 0x0001{C0,,,F8} */
/* 0x000200-0x000298 -- Device */
u64 p_device[4]; /* 0x0002{00,,,18} */
u64 _pad_000220[4]; /* 0x0002{20,,,38} */
u64 p_wr_req_buf[4]; /* 0x0002{40,,,58} */
u64 _pad_000260[4]; /* 0x0002{60,,,78} */
u64 p_rrb_map[2]; /* 0x0002{80,,,88} */
#define p_even_resp p_rrb_map[0] /* 0x000280 */
#define p_odd_resp p_rrb_map[1] /* 0x000288 */
u64 p_resp_status; /* 0x000290 */
u64 p_resp_clear; /* 0x000298 */
u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */
/* 0x000300-0x0003F8 -- Buffer Address Match Registers */
struct {
u64 upper; /* 0x0003{00,,,F0} */
u64 lower; /* 0x0003{08,,,F8} */
} p_buf_addr_match[16];
/* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
struct {
u64 flush_w_touch; /* 0x000{400,,,5C0} */
u64 flush_wo_touch; /* 0x000{408,,,5C8} */
u64 inflight; /* 0x000{410,,,5D0} */
u64 prefetch; /* 0x000{418,,,5D8} */
u64 total_pci_retry; /* 0x000{420,,,5E0} */
u64 max_pci_retry; /* 0x000{428,,,5E8} */
u64 max_latency; /* 0x000{430,,,5F0} */
u64 clear_all; /* 0x000{438,,,5F8} */
} p_buf_count[8];
/* 0x000600-0x0009FF -- PCI/X registers */
u64 p_pcix_bus_err_addr; /* 0x000600 */
u64 p_pcix_bus_err_attr; /* 0x000608 */
u64 p_pcix_bus_err_data; /* 0x000610 */
u64 p_pcix_pio_split_addr; /* 0x000618 */
u64 p_pcix_pio_split_attr; /* 0x000620 */
u64 p_pcix_dma_req_err_attr; /* 0x000628 */
u64 p_pcix_dma_req_err_addr; /* 0x000630 */
u64 p_pcix_timeout; /* 0x000638 */
u64 _pad_000640[120]; /* 0x000{640,,,9F8} */
/* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
struct {
u64 p_buf_addr; /* 0x000{A00,,,AF0} */
u64 p_buf_attr; /* 0X000{A08,,,AF8} */
} p_pcix_read_buf_64[16];
struct {
u64 p_buf_addr; /* 0x000{B00,,,BE0} */
u64 p_buf_attr; /* 0x000{B08,,,BE8} */
u64 p_buf_valid; /* 0x000{B10,,,BF0} */
u64 __pad1; /* 0x000{B18,,,BF8} */
} p_pcix_write_buf_64[8];
/* End of Local Registers -- Start of Address Map space */
char _pad_000c00[0x010000 - 0x000c00];
/* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */
u64 p_int_ate_ram[1024]; /* 0x010000-0x011fff */
/* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */
u64 p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */
char _pad_014000[0x18000 - 0x014000];
/* 0x18000-0x197F8 -- PIC Write Request Ram */
u64 p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
u64 p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
u64 p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
char _pad_019800[0x20000 - 0x019800];
/* 0x020000-0x027FFF -- PCI Device Configuration Spaces */
union {
u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
union {
u8 c[0x100 / 1];
u16 s[0x100 / 2];
u32 l[0x100 / 4];
u64 d[0x100 / 8];
} f[8];
} p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */
/* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
union {
u8 c[0x1000 / 1]; /* 0x028000-0x029000 */
u16 s[0x1000 / 2]; /* 0x028000-0x029000 */
u32 l[0x1000 / 4]; /* 0x028000-0x029000 */
u64 d[0x1000 / 8]; /* 0x028000-0x029000 */
union {
u8 c[0x100 / 1];
u16 s[0x100 / 2];
u32 l[0x100 / 4];
u64 d[0x100 / 8];
} f[8];
} p_type1_cfg; /* 0x028000-0x029000 */
char _pad_029000[0x030000-0x029000];
/* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
union {
u8 c[8 / 1];
u16 s[8 / 2];
u32 l[8 / 4];
u64 d[8 / 8];
} p_pci_iack; /* 0x030000-0x030007 */
char _pad_030007[0x040000-0x030008];
/* 0x040000-0x030007 -- PCIX Special Cycle */
union {
u8 c[8 / 1];
u16 s[8 / 2];
u32 l[8 / 4];
u64 d[8 / 8];
} p_pcix_cycle; /* 0x040000-0x040007 */
};
#endif /* _ASM_IA64_SN_PCI_PIC_H */

View file

@ -1,28 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_RW_MMR_H
#define _ASM_IA64_SN_RW_MMR_H
/*
* This file that access MMRs via uncached physical addresses.
* pio_phys_read_mmr - read an MMR
* pio_phys_write_mmr - write an MMR
* pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
* Second MMR will be skipped if address is NULL
*
* Addresses passed to these routines should be uncached physical addresses
* ie., 0x80000....
*/
extern long pio_phys_read_mmr(volatile long *mmr);
extern void pio_phys_write_mmr(volatile long *mmr, long val);
extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2);
#endif /* _ASM_IA64_SN_RW_MMR_H */

View file

@ -1,502 +0,0 @@
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2001-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SHUB_MMR_H
#define _ASM_IA64_SN_SHUB_MMR_H
/* ==================================================================== */
/* Register "SH_IPI_INT" */
/* SHub Inter-Processor Interrupt Registers */
/* ==================================================================== */
#define SH1_IPI_INT __IA64_UL_CONST(0x0000000110000380)
#define SH2_IPI_INT __IA64_UL_CONST(0x0000000010000380)
/* SH_IPI_INT_TYPE */
/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
#define SH_IPI_INT_TYPE_SHFT 0
#define SH_IPI_INT_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
/* SH_IPI_INT_AGT */
/* Description: Agent, must be 0 for SHub */
#define SH_IPI_INT_AGT_SHFT 3
#define SH_IPI_INT_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
/* SH_IPI_INT_PID */
/* Description: Processor ID, same setting as on targeted McKinley */
#define SH_IPI_INT_PID_SHFT 4
#define SH_IPI_INT_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
/* SH_IPI_INT_BASE */
/* Description: Optional interrupt vector area, 2MB aligned */
#define SH_IPI_INT_BASE_SHFT 21
#define SH_IPI_INT_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
/* SH_IPI_INT_IDX */
/* Description: Targeted McKinley interrupt vector */
#define SH_IPI_INT_IDX_SHFT 52
#define SH_IPI_INT_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
/* SH_IPI_INT_SEND */
/* Description: Send Interrupt Message to PI, This generates a puls */
#define SH_IPI_INT_SEND_SHFT 63
#define SH_IPI_INT_SEND_MASK __IA64_UL_CONST(0x8000000000000000)
/* ==================================================================== */
/* Register "SH_EVENT_OCCURRED" */
/* SHub Interrupt Event Occurred */
/* ==================================================================== */
#define SH1_EVENT_OCCURRED __IA64_UL_CONST(0x0000000110010000)
#define SH1_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000110010008)
#define SH2_EVENT_OCCURRED __IA64_UL_CONST(0x0000000010010000)
#define SH2_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000010010008)
/* ==================================================================== */
/* Register "SH_PI_CAM_CONTROL" */
/* CRB CAM MMR Access Control */
/* ==================================================================== */
#define SH1_PI_CAM_CONTROL __IA64_UL_CONST(0x0000000120050300)
/* ==================================================================== */
/* Register "SH_SHUB_ID" */
/* SHub ID Number */
/* ==================================================================== */
#define SH1_SHUB_ID __IA64_UL_CONST(0x0000000110060580)
#define SH1_SHUB_ID_REVISION_SHFT 28
#define SH1_SHUB_ID_REVISION_MASK __IA64_UL_CONST(0x00000000f0000000)
/* ==================================================================== */
/* Register "SH_RTC" */
/* Real-time Clock */
/* ==================================================================== */
#define SH1_RTC __IA64_UL_CONST(0x00000001101c0000)
#define SH2_RTC __IA64_UL_CONST(0x00000002101c0000)
#define SH_RTC_MASK __IA64_UL_CONST(0x007fffffffffffff)
/* ==================================================================== */
/* Register "SH_PIO_WRITE_STATUS_0|1" */
/* PIO Write Status for CPU 0 & 1 */
/* ==================================================================== */
#define SH1_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000120070200)
#define SH1_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000120070280)
#define SH2_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000020070200)
#define SH2_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000020070280)
#define SH2_PIO_WRITE_STATUS_2 __IA64_UL_CONST(0x0000000020070300)
#define SH2_PIO_WRITE_STATUS_3 __IA64_UL_CONST(0x0000000020070380)
/* SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK */
/* Description: Deadlock response detected */
#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT 1
#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK \
__IA64_UL_CONST(0x0000000000000002)
/* SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT */
/* Description: Count of currently pending PIO writes */
#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_SHFT 56
#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK \
__IA64_UL_CONST(0x3f00000000000000)
/* ==================================================================== */
/* Register "SH_PIO_WRITE_STATUS_0_ALIAS" */
/* ==================================================================== */
#define SH1_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000120070208)
#define SH2_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000020070208)
/* ==================================================================== */
/* Register "SH_EVENT_OCCURRED" */
/* SHub Interrupt Event Occurred */
/* ==================================================================== */
/* SH_EVENT_OCCURRED_UART_INT */
/* Description: Pending Junk Bus UART Interrupt */
#define SH_EVENT_OCCURRED_UART_INT_SHFT 20
#define SH_EVENT_OCCURRED_UART_INT_MASK __IA64_UL_CONST(0x0000000000100000)
/* SH_EVENT_OCCURRED_IPI_INT */
/* Description: Pending IPI Interrupt */
#define SH_EVENT_OCCURRED_IPI_INT_SHFT 28
#define SH_EVENT_OCCURRED_IPI_INT_MASK __IA64_UL_CONST(0x0000000010000000)
/* SH_EVENT_OCCURRED_II_INT0 */
/* Description: Pending II 0 Interrupt */
#define SH_EVENT_OCCURRED_II_INT0_SHFT 29
#define SH_EVENT_OCCURRED_II_INT0_MASK __IA64_UL_CONST(0x0000000020000000)
/* SH_EVENT_OCCURRED_II_INT1 */
/* Description: Pending II 1 Interrupt */
#define SH_EVENT_OCCURRED_II_INT1_SHFT 30
#define SH_EVENT_OCCURRED_II_INT1_MASK __IA64_UL_CONST(0x0000000040000000)
/* SH2_EVENT_OCCURRED_EXTIO_INT2 */
/* Description: Pending SHUB 2 EXT IO INT2 */
#define SH2_EVENT_OCCURRED_EXTIO_INT2_SHFT 33
#define SH2_EVENT_OCCURRED_EXTIO_INT2_MASK __IA64_UL_CONST(0x0000000200000000)
/* SH2_EVENT_OCCURRED_EXTIO_INT3 */
/* Description: Pending SHUB 2 EXT IO INT3 */
#define SH2_EVENT_OCCURRED_EXTIO_INT3_SHFT 34
#define SH2_EVENT_OCCURRED_EXTIO_INT3_MASK __IA64_UL_CONST(0x0000000400000000)
#define SH_ALL_INT_MASK \
(SH_EVENT_OCCURRED_UART_INT_MASK | SH_EVENT_OCCURRED_IPI_INT_MASK | \
SH_EVENT_OCCURRED_II_INT0_MASK | SH_EVENT_OCCURRED_II_INT1_MASK | \
SH_EVENT_OCCURRED_II_INT1_MASK | SH2_EVENT_OCCURRED_EXTIO_INT2_MASK | \
SH2_EVENT_OCCURRED_EXTIO_INT3_MASK)
/* ==================================================================== */
/* LEDS */
/* ==================================================================== */
#define SH1_REAL_JUNK_BUS_LED0 0x7fed00000UL
#define SH1_REAL_JUNK_BUS_LED1 0x7fed10000UL
#define SH1_REAL_JUNK_BUS_LED2 0x7fed20000UL
#define SH1_REAL_JUNK_BUS_LED3 0x7fed30000UL
#define SH2_REAL_JUNK_BUS_LED0 0xf0000000UL
#define SH2_REAL_JUNK_BUS_LED1 0xf0010000UL
#define SH2_REAL_JUNK_BUS_LED2 0xf0020000UL
#define SH2_REAL_JUNK_BUS_LED3 0xf0030000UL
/* ==================================================================== */
/* Register "SH1_PTC_0" */
/* Puge Translation Cache Message Configuration Information */
/* ==================================================================== */
#define SH1_PTC_0 __IA64_UL_CONST(0x00000001101a0000)
/* SH1_PTC_0_A */
/* Description: Type */
#define SH1_PTC_0_A_SHFT 0
/* SH1_PTC_0_PS */
/* Description: Page Size */
#define SH1_PTC_0_PS_SHFT 2
/* SH1_PTC_0_RID */
/* Description: Region ID */
#define SH1_PTC_0_RID_SHFT 8
/* SH1_PTC_0_START */
/* Description: Start */
#define SH1_PTC_0_START_SHFT 63
/* ==================================================================== */
/* Register "SH1_PTC_1" */
/* Puge Translation Cache Message Configuration Information */
/* ==================================================================== */
#define SH1_PTC_1 __IA64_UL_CONST(0x00000001101a0080)
/* SH1_PTC_1_START */
/* Description: PTC_1 Start */
#define SH1_PTC_1_START_SHFT 63
/* ==================================================================== */
/* Register "SH2_PTC" */
/* Puge Translation Cache Message Configuration Information */
/* ==================================================================== */
#define SH2_PTC __IA64_UL_CONST(0x0000000170000000)
/* SH2_PTC_A */
/* Description: Type */
#define SH2_PTC_A_SHFT 0
/* SH2_PTC_PS */
/* Description: Page Size */
#define SH2_PTC_PS_SHFT 2
/* SH2_PTC_RID */
/* Description: Region ID */
#define SH2_PTC_RID_SHFT 4
/* SH2_PTC_START */
/* Description: Start */
#define SH2_PTC_START_SHFT 63
/* SH2_PTC_ADDR_RID */
/* Description: Region ID */
#define SH2_PTC_ADDR_SHFT 4
#define SH2_PTC_ADDR_MASK __IA64_UL_CONST(0x1ffffffffffff000)
/* ==================================================================== */
/* Register "SH_RTC1_INT_CONFIG" */
/* SHub RTC 1 Interrupt Config Registers */
/* ==================================================================== */
#define SH1_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000110001480)
#define SH2_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000010001480)
#define SH_RTC1_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
#define SH_RTC1_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_RTC1_INT_CONFIG_TYPE */
/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
#define SH_RTC1_INT_CONFIG_TYPE_SHFT 0
#define SH_RTC1_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
/* SH_RTC1_INT_CONFIG_AGT */
/* Description: Agent, must be 0 for SHub */
#define SH_RTC1_INT_CONFIG_AGT_SHFT 3
#define SH_RTC1_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
/* SH_RTC1_INT_CONFIG_PID */
/* Description: Processor ID, same setting as on targeted McKinley */
#define SH_RTC1_INT_CONFIG_PID_SHFT 4
#define SH_RTC1_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
/* SH_RTC1_INT_CONFIG_BASE */
/* Description: Optional interrupt vector area, 2MB aligned */
#define SH_RTC1_INT_CONFIG_BASE_SHFT 21
#define SH_RTC1_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
/* SH_RTC1_INT_CONFIG_IDX */
/* Description: Targeted McKinley interrupt vector */
#define SH_RTC1_INT_CONFIG_IDX_SHFT 52
#define SH_RTC1_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
/* ==================================================================== */
/* Register "SH_RTC1_INT_ENABLE" */
/* SHub RTC 1 Interrupt Enable Registers */
/* ==================================================================== */
#define SH1_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000110001500)
#define SH2_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000010001500)
#define SH_RTC1_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
#define SH_RTC1_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_RTC1_INT_ENABLE_RTC1_ENABLE */
/* Description: Enable RTC 1 Interrupt */
#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_SHFT 0
#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_MASK \
__IA64_UL_CONST(0x0000000000000001)
/* ==================================================================== */
/* Register "SH_RTC2_INT_CONFIG" */
/* SHub RTC 2 Interrupt Config Registers */
/* ==================================================================== */
#define SH1_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000110001580)
#define SH2_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000010001580)
#define SH_RTC2_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
#define SH_RTC2_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_RTC2_INT_CONFIG_TYPE */
/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
#define SH_RTC2_INT_CONFIG_TYPE_SHFT 0
#define SH_RTC2_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
/* SH_RTC2_INT_CONFIG_AGT */
/* Description: Agent, must be 0 for SHub */
#define SH_RTC2_INT_CONFIG_AGT_SHFT 3
#define SH_RTC2_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
/* SH_RTC2_INT_CONFIG_PID */
/* Description: Processor ID, same setting as on targeted McKinley */
#define SH_RTC2_INT_CONFIG_PID_SHFT 4
#define SH_RTC2_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
/* SH_RTC2_INT_CONFIG_BASE */
/* Description: Optional interrupt vector area, 2MB aligned */
#define SH_RTC2_INT_CONFIG_BASE_SHFT 21
#define SH_RTC2_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
/* SH_RTC2_INT_CONFIG_IDX */
/* Description: Targeted McKinley interrupt vector */
#define SH_RTC2_INT_CONFIG_IDX_SHFT 52
#define SH_RTC2_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
/* ==================================================================== */
/* Register "SH_RTC2_INT_ENABLE" */
/* SHub RTC 2 Interrupt Enable Registers */
/* ==================================================================== */
#define SH1_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000110001600)
#define SH2_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000010001600)
#define SH_RTC2_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
#define SH_RTC2_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_RTC2_INT_ENABLE_RTC2_ENABLE */
/* Description: Enable RTC 2 Interrupt */
#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_SHFT 0
#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_MASK \
__IA64_UL_CONST(0x0000000000000001)
/* ==================================================================== */
/* Register "SH_RTC3_INT_CONFIG" */
/* SHub RTC 3 Interrupt Config Registers */
/* ==================================================================== */
#define SH1_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000110001680)
#define SH2_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000010001680)
#define SH_RTC3_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff)
#define SH_RTC3_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_RTC3_INT_CONFIG_TYPE */
/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */
#define SH_RTC3_INT_CONFIG_TYPE_SHFT 0
#define SH_RTC3_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007)
/* SH_RTC3_INT_CONFIG_AGT */
/* Description: Agent, must be 0 for SHub */
#define SH_RTC3_INT_CONFIG_AGT_SHFT 3
#define SH_RTC3_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008)
/* SH_RTC3_INT_CONFIG_PID */
/* Description: Processor ID, same setting as on targeted McKinley */
#define SH_RTC3_INT_CONFIG_PID_SHFT 4
#define SH_RTC3_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0)
/* SH_RTC3_INT_CONFIG_BASE */
/* Description: Optional interrupt vector area, 2MB aligned */
#define SH_RTC3_INT_CONFIG_BASE_SHFT 21
#define SH_RTC3_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000)
/* SH_RTC3_INT_CONFIG_IDX */
/* Description: Targeted McKinley interrupt vector */
#define SH_RTC3_INT_CONFIG_IDX_SHFT 52
#define SH_RTC3_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000)
/* ==================================================================== */
/* Register "SH_RTC3_INT_ENABLE" */
/* SHub RTC 3 Interrupt Enable Registers */
/* ==================================================================== */
#define SH1_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000110001700)
#define SH2_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000010001700)
#define SH_RTC3_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001)
#define SH_RTC3_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_RTC3_INT_ENABLE_RTC3_ENABLE */
/* Description: Enable RTC 3 Interrupt */
#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_SHFT 0
#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_MASK \
__IA64_UL_CONST(0x0000000000000001)
/* SH_EVENT_OCCURRED_RTC1_INT */
/* Description: Pending RTC 1 Interrupt */
#define SH_EVENT_OCCURRED_RTC1_INT_SHFT 24
#define SH_EVENT_OCCURRED_RTC1_INT_MASK __IA64_UL_CONST(0x0000000001000000)
/* SH_EVENT_OCCURRED_RTC2_INT */
/* Description: Pending RTC 2 Interrupt */
#define SH_EVENT_OCCURRED_RTC2_INT_SHFT 25
#define SH_EVENT_OCCURRED_RTC2_INT_MASK __IA64_UL_CONST(0x0000000002000000)
/* SH_EVENT_OCCURRED_RTC3_INT */
/* Description: Pending RTC 3 Interrupt */
#define SH_EVENT_OCCURRED_RTC3_INT_SHFT 26
#define SH_EVENT_OCCURRED_RTC3_INT_MASK __IA64_UL_CONST(0x0000000004000000)
/* ==================================================================== */
/* Register "SH_IPI_ACCESS" */
/* CPU interrupt Access Permission Bits */
/* ==================================================================== */
#define SH1_IPI_ACCESS __IA64_UL_CONST(0x0000000110060480)
#define SH2_IPI_ACCESS0 __IA64_UL_CONST(0x0000000010060c00)
#define SH2_IPI_ACCESS1 __IA64_UL_CONST(0x0000000010060c80)
#define SH2_IPI_ACCESS2 __IA64_UL_CONST(0x0000000010060d00)
#define SH2_IPI_ACCESS3 __IA64_UL_CONST(0x0000000010060d80)
/* ==================================================================== */
/* Register "SH_INT_CMPB" */
/* RTC Compare Value for Processor B */
/* ==================================================================== */
#define SH1_INT_CMPB __IA64_UL_CONST(0x00000001101b0080)
#define SH2_INT_CMPB __IA64_UL_CONST(0x00000000101b0080)
#define SH_INT_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff)
#define SH_INT_CMPB_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_INT_CMPB_REAL_TIME_CMPB */
/* Description: Real Time Clock Compare */
#define SH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
#define SH_INT_CMPB_REAL_TIME_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff)
/* ==================================================================== */
/* Register "SH_INT_CMPC" */
/* RTC Compare Value for Processor C */
/* ==================================================================== */
#define SH1_INT_CMPC __IA64_UL_CONST(0x00000001101b0100)
#define SH2_INT_CMPC __IA64_UL_CONST(0x00000000101b0100)
#define SH_INT_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff)
#define SH_INT_CMPC_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_INT_CMPC_REAL_TIME_CMPC */
/* Description: Real Time Clock Compare */
#define SH_INT_CMPC_REAL_TIME_CMPC_SHFT 0
#define SH_INT_CMPC_REAL_TIME_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff)
/* ==================================================================== */
/* Register "SH_INT_CMPD" */
/* RTC Compare Value for Processor D */
/* ==================================================================== */
#define SH1_INT_CMPD __IA64_UL_CONST(0x00000001101b0180)
#define SH2_INT_CMPD __IA64_UL_CONST(0x00000000101b0180)
#define SH_INT_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff)
#define SH_INT_CMPD_INIT __IA64_UL_CONST(0x0000000000000000)
/* SH_INT_CMPD_REAL_TIME_CMPD */
/* Description: Real Time Clock Compare */
#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0
#define SH_INT_CMPD_REAL_TIME_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff)
/* ==================================================================== */
/* Register "SH_MD_DQLP_MMR_DIR_PRIVEC0" */
/* privilege vector for acc=0 */
/* ==================================================================== */
#define SH1_MD_DQLP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100030300)
/* ==================================================================== */
/* Register "SH_MD_DQRP_MMR_DIR_PRIVEC0" */
/* privilege vector for acc=0 */
/* ==================================================================== */
#define SH1_MD_DQRP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100050300)
/* ==================================================================== */
/* Some MMRs are functionally identical (or close enough) on both SHUB1 */
/* and SHUB2 that it makes sense to define a geberic name for the MMR. */
/* It is acceptable to use (for example) SH_IPI_INT to reference the */
/* the IPI MMR. The value of SH_IPI_INT is determined at runtime based */
/* on the type of the SHUB. Do not use these #defines in performance */
/* critical code or loops - there is a small performance penalty. */
/* ==================================================================== */
#define shubmmr(a,b) (is_shub2() ? a##2_##b : a##1_##b)
#define SH_REAL_JUNK_BUS_LED0 shubmmr(SH, REAL_JUNK_BUS_LED0)
#define SH_IPI_INT shubmmr(SH, IPI_INT)
#define SH_EVENT_OCCURRED shubmmr(SH, EVENT_OCCURRED)
#define SH_EVENT_OCCURRED_ALIAS shubmmr(SH, EVENT_OCCURRED_ALIAS)
#define SH_RTC shubmmr(SH, RTC)
#define SH_RTC1_INT_CONFIG shubmmr(SH, RTC1_INT_CONFIG)
#define SH_RTC1_INT_ENABLE shubmmr(SH, RTC1_INT_ENABLE)
#define SH_RTC2_INT_CONFIG shubmmr(SH, RTC2_INT_CONFIG)
#define SH_RTC2_INT_ENABLE shubmmr(SH, RTC2_INT_ENABLE)
#define SH_RTC3_INT_CONFIG shubmmr(SH, RTC3_INT_CONFIG)
#define SH_RTC3_INT_ENABLE shubmmr(SH, RTC3_INT_ENABLE)
#define SH_INT_CMPB shubmmr(SH, INT_CMPB)
#define SH_INT_CMPC shubmmr(SH, INT_CMPC)
#define SH_INT_CMPD shubmmr(SH, INT_CMPD)
/* ========================================================================== */
/* Register "SH2_BT_ENG_CSR_0" */
/* Engine 0 Control and Status Register */
/* ========================================================================== */
#define SH2_BT_ENG_CSR_0 __IA64_UL_CONST(0x0000000030040000)
#define SH2_BT_ENG_SRC_ADDR_0 __IA64_UL_CONST(0x0000000030040080)
#define SH2_BT_ENG_DEST_ADDR_0 __IA64_UL_CONST(0x0000000030040100)
#define SH2_BT_ENG_NOTIF_ADDR_0 __IA64_UL_CONST(0x0000000030040180)
/* ========================================================================== */
/* BTE interfaces 1-3 */
/* ========================================================================== */
#define SH2_BT_ENG_CSR_1 __IA64_UL_CONST(0x0000000030050000)
#define SH2_BT_ENG_CSR_2 __IA64_UL_CONST(0x0000000030060000)
#define SH2_BT_ENG_CSR_3 __IA64_UL_CONST(0x0000000030070000)
#endif /* _ASM_IA64_SN_SHUB_MMR_H */

File diff suppressed because it is too large Load diff

View file

@ -8,7 +8,7 @@
#ifndef _ASM_IA64_SN_SIMULATOR_H
#define _ASM_IA64_SN_SIMULATOR_H
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_SGI_UV)
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_UV)
#define SNMAGIC 0xaeeeeeee8badbeefL
#define IS_MEDUSA() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;})

View file

@ -1,242 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
*
* Data types used by the SN_SAL_HWPERF_OP SAL call for monitoring
* SGI Altix node and router hardware
*
* Mark Goodwin <markgw@sgi.com> Mon Aug 30 12:23:46 EST 2004
*/
#ifndef SN_HWPERF_H
#define SN_HWPERF_H
/*
* object structure. SN_HWPERF_ENUM_OBJECTS and SN_HWPERF_GET_CPU_INFO
* return an array of these. Do not change this without also
* changing the corresponding SAL code.
*/
#define SN_HWPERF_MAXSTRING 128
struct sn_hwperf_object_info {
u32 id;
union {
struct {
u64 this_part:1;
u64 is_shared:1;
} fields;
struct {
u64 flags;
u64 reserved;
} b;
} f;
char name[SN_HWPERF_MAXSTRING];
char location[SN_HWPERF_MAXSTRING];
u32 ports;
};
#define sn_hwp_this_part f.fields.this_part
#define sn_hwp_is_shared f.fields.is_shared
#define sn_hwp_flags f.b.flags
/* macros for object classification */
#define SN_HWPERF_IS_NODE(x) ((x) && strstr((x)->name, "SHub"))
#define SN_HWPERF_IS_NODE_SHUB2(x) ((x) && strstr((x)->name, "SHub 2."))
#define SN_HWPERF_IS_IONODE(x) ((x) && strstr((x)->name, "TIO"))
#define SN_HWPERF_IS_NL3ROUTER(x) ((x) && strstr((x)->name, "NL3Router"))
#define SN_HWPERF_IS_NL4ROUTER(x) ((x) && strstr((x)->name, "NL4Router"))
#define SN_HWPERF_IS_OLDROUTER(x) ((x) && strstr((x)->name, "Router"))
#define SN_HWPERF_IS_ROUTER(x) (SN_HWPERF_IS_NL3ROUTER(x) || \
SN_HWPERF_IS_NL4ROUTER(x) || \
SN_HWPERF_IS_OLDROUTER(x))
#define SN_HWPERF_FOREIGN(x) ((x) && !(x)->sn_hwp_this_part && !(x)->sn_hwp_is_shared)
#define SN_HWPERF_SAME_OBJTYPE(x,y) ((SN_HWPERF_IS_NODE(x) && SN_HWPERF_IS_NODE(y)) ||\
(SN_HWPERF_IS_IONODE(x) && SN_HWPERF_IS_IONODE(y)) ||\
(SN_HWPERF_IS_ROUTER(x) && SN_HWPERF_IS_ROUTER(y)))
/* numa port structure, SN_HWPERF_ENUM_PORTS returns an array of these */
struct sn_hwperf_port_info {
u32 port;
u32 conn_id;
u32 conn_port;
};
/* for HWPERF_{GET,SET}_MMRS */
struct sn_hwperf_data {
u64 addr;
u64 data;
};
/* user ioctl() argument, see below */
struct sn_hwperf_ioctl_args {
u64 arg; /* argument, usually an object id */
u64 sz; /* size of transfer */
void *ptr; /* pointer to source/target */
u32 v0; /* second return value */
};
/*
* For SN_HWPERF_{GET,SET}_MMRS and SN_HWPERF_OBJECT_DISTANCE,
* sn_hwperf_ioctl_args.arg can be used to specify a CPU on which
* to call SAL, and whether to use an interprocessor interrupt
* or task migration in order to do so. If the CPU specified is
* SN_HWPERF_ARG_ANY_CPU, then the current CPU will be used.
*/
#define SN_HWPERF_ARG_ANY_CPU 0x7fffffffUL
#define SN_HWPERF_ARG_CPU_MASK 0x7fffffff00000000ULL
#define SN_HWPERF_ARG_USE_IPI_MASK 0x8000000000000000ULL
#define SN_HWPERF_ARG_OBJID_MASK 0x00000000ffffffffULL
/*
* ioctl requests on the "sn_hwperf" misc device that call SAL.
*/
#define SN_HWPERF_OP_MEM_COPYIN 0x1000
#define SN_HWPERF_OP_MEM_COPYOUT 0x2000
#define SN_HWPERF_OP_MASK 0x0fff
/*
* Determine mem requirement.
* arg don't care
* sz 8
* p pointer to u64 integer
*/
#define SN_HWPERF_GET_HEAPSIZE 1
/*
* Install mem for SAL drvr
* arg don't care
* sz sizeof buffer pointed to by p
* p pointer to buffer for scratch area
*/
#define SN_HWPERF_INSTALL_HEAP 2
/*
* Determine number of objects
* arg don't care
* sz 8
* p pointer to u64 integer
*/
#define SN_HWPERF_OBJECT_COUNT (10|SN_HWPERF_OP_MEM_COPYOUT)
/*
* Determine object "distance", relative to a cpu. This operation can
* execute on a designated logical cpu number, using either an IPI or
* via task migration. If the cpu number is SN_HWPERF_ANY_CPU, then
* the current CPU is used. See the SN_HWPERF_ARG_* macros above.
*
* arg bitmap of IPI flag, cpu number and object id
* sz 8
* p pointer to u64 integer
*/
#define SN_HWPERF_OBJECT_DISTANCE (11|SN_HWPERF_OP_MEM_COPYOUT)
/*
* Enumerate objects. Special case if sz == 8, returns the required
* buffer size.
* arg don't care
* sz sizeof buffer pointed to by p
* p pointer to array of struct sn_hwperf_object_info
*/
#define SN_HWPERF_ENUM_OBJECTS (12|SN_HWPERF_OP_MEM_COPYOUT)
/*
* Enumerate NumaLink ports for an object. Special case if sz == 8,
* returns the required buffer size.
* arg object id
* sz sizeof buffer pointed to by p
* p pointer to array of struct sn_hwperf_port_info
*/
#define SN_HWPERF_ENUM_PORTS (13|SN_HWPERF_OP_MEM_COPYOUT)
/*
* SET/GET memory mapped registers. These operations can execute
* on a designated logical cpu number, using either an IPI or via
* task migration. If the cpu number is SN_HWPERF_ANY_CPU, then
* the current CPU is used. See the SN_HWPERF_ARG_* macros above.
*
* arg bitmap of ipi flag, cpu number and object id
* sz sizeof buffer pointed to by p
* p pointer to array of struct sn_hwperf_data
*/
#define SN_HWPERF_SET_MMRS (14|SN_HWPERF_OP_MEM_COPYIN)
#define SN_HWPERF_GET_MMRS (15|SN_HWPERF_OP_MEM_COPYOUT| \
SN_HWPERF_OP_MEM_COPYIN)
/*
* Lock a shared object
* arg object id
* sz don't care
* p don't care
*/
#define SN_HWPERF_ACQUIRE 16
/*
* Unlock a shared object
* arg object id
* sz don't care
* p don't care
*/
#define SN_HWPERF_RELEASE 17
/*
* Break a lock on a shared object
* arg object id
* sz don't care
* p don't care
*/
#define SN_HWPERF_FORCE_RELEASE 18
/*
* ioctl requests on "sn_hwperf" that do not call SAL
*/
/*
* get cpu info as an array of hwperf_object_info_t.
* id is logical CPU number, name is description, location
* is geoid (e.g. 001c04#1c). Special case if sz == 8,
* returns the required buffer size.
*
* arg don't care
* sz sizeof buffer pointed to by p
* p pointer to array of struct sn_hwperf_object_info
*/
#define SN_HWPERF_GET_CPU_INFO (100|SN_HWPERF_OP_MEM_COPYOUT)
/*
* Given an object id, return it's node number (aka cnode).
* arg object id
* sz 8
* p pointer to u64 integer
*/
#define SN_HWPERF_GET_OBJ_NODE (101|SN_HWPERF_OP_MEM_COPYOUT)
/*
* Given a node number (cnode), return it's nasid.
* arg ordinal node number (aka cnodeid)
* sz 8
* p pointer to u64 integer
*/
#define SN_HWPERF_GET_NODE_NASID (102|SN_HWPERF_OP_MEM_COPYOUT)
/*
* Given a node id, determine the id of the nearest node with CPUs
* and the id of the nearest node that has memory. The argument
* node would normally be a "headless" node, e.g. an "IO node".
* Return 0 on success.
*/
extern int sn_hwperf_get_nearest_node(cnodeid_t node,
cnodeid_t *near_mem, cnodeid_t *near_cpu);
/* return codes */
#define SN_HWPERF_OP_OK 0
#define SN_HWPERF_OP_NOMEM 1
#define SN_HWPERF_OP_NO_PERM 2
#define SN_HWPERF_OP_IO_ERROR 3
#define SN_HWPERF_OP_BUSY 4
#define SN_HWPERF_OP_RECONFIGURE 253
#define SN_HWPERF_OP_INVAL 254
int sn_topology_open(struct inode *inode, struct file *file);
int sn_topology_release(struct inode *inode, struct file *file);
#endif /* SN_HWPERF_H */

View file

@ -1,132 +0,0 @@
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN_CPUID_H
#define _ASM_IA64_SN_SN_CPUID_H
#include <linux/smp.h>
#include <asm/sn/addrs.h>
#include <asm/sn/pda.h>
#include <asm/intrinsics.h>
/*
* Functions for converting between cpuids, nodeids and NASIDs.
*
* These are for SGI platforms only.
*
*/
/*
* Definitions of terms (these definitions are for IA64 ONLY. Other architectures
* use cpuid/cpunum quite defferently):
*
* CPUID - a number in range of 0..NR_CPUS-1 that uniquely identifies
* the cpu. The value cpuid has no significance on IA64 other than
* the boot cpu is 0.
* smp_processor_id() returns the cpuid of the current cpu.
*
* CPU_PHYSICAL_ID (also known as HARD_PROCESSOR_ID)
* This is the same as 31:24 of the processor LID register
* hard_smp_processor_id()- cpu_physical_id of current processor
* cpu_physical_id(cpuid) - convert a <cpuid> to a <physical_cpuid>
* cpu_logical_id(phy_id) - convert a <physical_cpuid> to a <cpuid>
* * not real efficient - don't use in perf critical code
*
* SLICE - a number in the range of 0 - 3 (typically) that represents the
* cpu number on a brick.
*
* SUBNODE - (almost obsolete) the number of the FSB that a cpu is
* connected to. This is also the same as the PI number. Usually 0 or 1.
*
* NOTE!!!: the value of the bits in the cpu physical id (SAPICid or LID) of a cpu has no
* significance. The SAPIC id (LID) is a 16-bit cookie that has meaning only to the PROM.
*
*
* The macros convert between cpu physical ids & slice/nasid/cnodeid.
* These terms are described below:
*
*
* Brick
* ----- ----- ----- ----- CPU
* | 0 | | 1 | | 0 | | 1 | SLICE
* ----- ----- ----- -----
* | | | |
* | | | |
* 0 | | 2 0 | | 2 FSB SLOT
* ------- -------
* | |
* | |
* | |
* ------------ -------------
* | | | |
* | SHUB | | SHUB | NASID (0..MAX_NASIDS)
* | |----- | | CNODEID (0..num_compact_nodes-1)
* | | | |
* | | | |
* ------------ -------------
* | |
*
*
*/
#define get_node_number(addr) NASID_GET(addr)
/*
* NOTE: on non-MP systems, only cpuid 0 exists
*/
extern short physical_node_map[]; /* indexed by nasid to get cnode */
/*
* Macros for retrieving info about current cpu
*/
#define get_nasid() (sn_nodepda->phys_cpuid[smp_processor_id()].nasid)
#define get_subnode() (sn_nodepda->phys_cpuid[smp_processor_id()].subnode)
#define get_slice() (sn_nodepda->phys_cpuid[smp_processor_id()].slice)
#define get_cnode() (sn_nodepda->phys_cpuid[smp_processor_id()].cnode)
#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
/*
* Macros for retrieving info about an arbitrary cpu
* cpuid - logical cpu id
*/
#define cpuid_to_nasid(cpuid) (sn_nodepda->phys_cpuid[cpuid].nasid)
#define cpuid_to_subnode(cpuid) (sn_nodepda->phys_cpuid[cpuid].subnode)
#define cpuid_to_slice(cpuid) (sn_nodepda->phys_cpuid[cpuid].slice)
/*
* Dont use the following in performance critical code. They require scans
* of potentially large tables.
*/
extern int nasid_slice_to_cpuid(int, int);
/*
* cnodeid_to_nasid - convert a cnodeid to a NASID
*/
#define cnodeid_to_nasid(cnodeid) (sn_cnodeid_to_nasid[cnodeid])
/*
* nasid_to_cnodeid - convert a NASID to a cnodeid
*/
#define nasid_to_cnodeid(nasid) (physical_node_map[nasid])
/*
* partition_coherence_id - get the coherence ID of the current partition
*/
extern u8 sn_coherency_id;
#define partition_coherence_id() (sn_coherency_id)
#endif /* _ASM_IA64_SN_SN_CPUID_H */

View file

@ -1,58 +0,0 @@
#ifndef _ASM_IA64_SN_FEATURE_SETS_H
#define _ASM_IA64_SN_FEATURE_SETS_H
/*
* SN PROM Features
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2005-2006 Silicon Graphics, Inc. All rights reserved.
*/
/* --------------------- PROM Features -----------------------------*/
extern int sn_prom_feature_available(int id);
#define MAX_PROM_FEATURE_SETS 2
/*
* The following defines features that may or may not be supported by the
* current PROM. The OS uses sn_prom_feature_available(feature) to test for
* the presence of a PROM feature. Down rev (old) PROMs will always test
* "false" for new features.
*
* Use:
* if (sn_prom_feature_available(PRF_XXX))
* ...
*/
#define PRF_PAL_CACHE_FLUSH_SAFE 0
#define PRF_DEVICE_FLUSH_LIST 1
#define PRF_HOTPLUG_SUPPORT 2
#define PRF_CPU_DISABLE_SUPPORT 3
/* --------------------- OS Features -------------------------------*/
/*
* The following defines OS features that are optionally present in
* the operating system.
* During boot, PROM is notified of these features via a series of calls:
*
* ia64_sn_set_os_feature(feature1);
*
* Once enabled, a feature cannot be disabled.
*
* By default, features are disabled unless explicitly enabled.
*
* These defines must be kept in sync with the corresponding
* PROM definitions in feature_sets.h.
*/
#define OSF_MCA_SLV_TO_OS_INIT_SLV 0
#define OSF_FEAT_LOG_SBES 1
#define OSF_ACPI_ENABLE 2
#define OSF_PCISEGMENT_ENABLE 3
#endif /* _ASM_IA64_SN_FEATURE_SETS_H */

File diff suppressed because it is too large Load diff

View file

@ -1,596 +0,0 @@
#ifndef _ASM_IA64_SN_TIO_TIOCA_H
#define _ASM_IA64_SN_TIO_TIOCA_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
*/
#define TIOCA_PART_NUM 0xE020
#define TIOCA_MFGR_NUM 0x24
#define TIOCA_REV_A 0x1
/*
* Register layout for TIO:CA. See below for bitmasks for each register.
*/
struct tioca {
u64 ca_id; /* 0x000000 */
u64 ca_control1; /* 0x000008 */
u64 ca_control2; /* 0x000010 */
u64 ca_status1; /* 0x000018 */
u64 ca_status2; /* 0x000020 */
u64 ca_gart_aperature; /* 0x000028 */
u64 ca_gfx_detach; /* 0x000030 */
u64 ca_inta_dest_addr; /* 0x000038 */
u64 ca_intb_dest_addr; /* 0x000040 */
u64 ca_err_int_dest_addr; /* 0x000048 */
u64 ca_int_status; /* 0x000050 */
u64 ca_int_status_alias; /* 0x000058 */
u64 ca_mult_error; /* 0x000060 */
u64 ca_mult_error_alias; /* 0x000068 */
u64 ca_first_error; /* 0x000070 */
u64 ca_int_mask; /* 0x000078 */
u64 ca_crm_pkterr_type; /* 0x000080 */
u64 ca_crm_pkterr_type_alias; /* 0x000088 */
u64 ca_crm_ct_error_detail_1; /* 0x000090 */
u64 ca_crm_ct_error_detail_2; /* 0x000098 */
u64 ca_crm_tnumto; /* 0x0000A0 */
u64 ca_gart_err; /* 0x0000A8 */
u64 ca_pcierr_type; /* 0x0000B0 */
u64 ca_pcierr_addr; /* 0x0000B8 */
u64 ca_pad_0000C0[3]; /* 0x0000{C0..D0} */
u64 ca_pci_rd_buf_flush; /* 0x0000D8 */
u64 ca_pci_dma_addr_extn; /* 0x0000E0 */
u64 ca_agp_dma_addr_extn; /* 0x0000E8 */
u64 ca_force_inta; /* 0x0000F0 */
u64 ca_force_intb; /* 0x0000F8 */
u64 ca_debug_vector_sel; /* 0x000100 */
u64 ca_debug_mux_core_sel; /* 0x000108 */
u64 ca_debug_mux_pci_sel; /* 0x000110 */
u64 ca_debug_domain_sel; /* 0x000118 */
u64 ca_pad_000120[28]; /* 0x0001{20..F8} */
u64 ca_gart_ptr_table; /* 0x200 */
u64 ca_gart_tlb_addr[8]; /* 0x2{08..40} */
};
/*
* Mask/shift definitions for TIO:CA registers. The convention here is
* to mainly use the names as they appear in the "TIO AEGIS Programmers'
* Reference" with a CA_ prefix added. Some exceptions were made to fix
* duplicate field names or to generalize fields that are common to
* different registers (ca_debug_mux_core_sel and ca_debug_mux_pci_sel for
* example).
*
* Fields consisting of a single bit have a single #define have a single
* macro declaration to mask the bit. Fields consisting of multiple bits
* have two declarations: one to mask the proper bits in a register, and
* a second with the suffix "_SHFT" to identify how far the mask needs to
* be shifted right to get its base value.
*/
/* ==== ca_control1 */
#define CA_SYS_BIG_END (1ull << 0)
#define CA_DMA_AGP_SWAP (1ull << 1)
#define CA_DMA_PCI_SWAP (1ull << 2)
#define CA_PIO_IO_SWAP (1ull << 3)
#define CA_PIO_MEM_SWAP (1ull << 4)
#define CA_GFX_WR_SWAP (1ull << 5)
#define CA_AGP_FW_ENABLE (1ull << 6)
#define CA_AGP_CAL_CYCLE (0x7ull << 7)
#define CA_AGP_CAL_CYCLE_SHFT 7
#define CA_AGP_CAL_PRSCL_BYP (1ull << 10)
#define CA_AGP_INIT_CAL_ENB (1ull << 11)
#define CA_INJ_ADDR_PERR (1ull << 12)
#define CA_INJ_DATA_PERR (1ull << 13)
/* bits 15:14 unused */
#define CA_PCIM_IO_NBE_AD (0x7ull << 16)
#define CA_PCIM_IO_NBE_AD_SHFT 16
#define CA_PCIM_FAST_BTB_ENB (1ull << 19)
/* bits 23:20 unused */
#define CA_PIO_ADDR_OFFSET (0xffull << 24)
#define CA_PIO_ADDR_OFFSET_SHFT 24
/* bits 35:32 unused */
#define CA_AGPDMA_OP_COMBDELAY (0x1full << 36)
#define CA_AGPDMA_OP_COMBDELAY_SHFT 36
/* bit 41 unused */
#define CA_AGPDMA_OP_ENB_COMBDELAY (1ull << 42)
#define CA_PCI_INT_LPCNT (0xffull << 44)
#define CA_PCI_INT_LPCNT_SHFT 44
/* bits 63:52 unused */
/* ==== ca_control2 */
#define CA_AGP_LATENCY_TO (0xffull << 0)
#define CA_AGP_LATENCY_TO_SHFT 0
#define CA_PCI_LATENCY_TO (0xffull << 8)
#define CA_PCI_LATENCY_TO_SHFT 8
#define CA_PCI_MAX_RETRY (0x3ffull << 16)
#define CA_PCI_MAX_RETRY_SHFT 16
/* bits 27:26 unused */
#define CA_RT_INT_EN (0x3ull << 28)
#define CA_RT_INT_EN_SHFT 28
#define CA_MSI_INT_ENB (1ull << 30)
#define CA_PCI_ARB_ERR_ENB (1ull << 31)
#define CA_GART_MEM_PARAM (0x3ull << 32)
#define CA_GART_MEM_PARAM_SHFT 32
#define CA_GART_RD_PREFETCH_ENB (1ull << 34)
#define CA_GART_WR_PREFETCH_ENB (1ull << 35)
#define CA_GART_FLUSH_TLB (1ull << 36)
/* bits 39:37 unused */
#define CA_CRM_TNUMTO_PERIOD (0x1fffull << 40)
#define CA_CRM_TNUMTO_PERIOD_SHFT 40
/* bits 55:53 unused */
#define CA_CRM_TNUMTO_ENB (1ull << 56)
#define CA_CRM_PRESCALER_BYP (1ull << 57)
/* bits 59:58 unused */
#define CA_CRM_MAX_CREDIT (0x7ull << 60)
#define CA_CRM_MAX_CREDIT_SHFT 60
/* bit 63 unused */
/* ==== ca_status1 */
#define CA_CORELET_ID (0x3ull << 0)
#define CA_CORELET_ID_SHFT 0
#define CA_INTA_N (1ull << 2)
#define CA_INTB_N (1ull << 3)
#define CA_CRM_CREDIT_AVAIL (0x7ull << 4)
#define CA_CRM_CREDIT_AVAIL_SHFT 4
/* bit 7 unused */
#define CA_CRM_SPACE_AVAIL (0x7full << 8)
#define CA_CRM_SPACE_AVAIL_SHFT 8
/* bit 15 unused */
#define CA_GART_TLB_VAL (0xffull << 16)
#define CA_GART_TLB_VAL_SHFT 16
/* bits 63:24 unused */
/* ==== ca_status2 */
#define CA_GFX_CREDIT_AVAIL (0xffull << 0)
#define CA_GFX_CREDIT_AVAIL_SHFT 0
#define CA_GFX_OPQ_AVAIL (0xffull << 8)
#define CA_GFX_OPQ_AVAIL_SHFT 8
#define CA_GFX_WRBUFF_AVAIL (0xffull << 16)
#define CA_GFX_WRBUFF_AVAIL_SHFT 16
#define CA_ADMA_OPQ_AVAIL (0xffull << 24)
#define CA_ADMA_OPQ_AVAIL_SHFT 24
#define CA_ADMA_WRBUFF_AVAIL (0xffull << 32)
#define CA_ADMA_WRBUFF_AVAIL_SHFT 32
#define CA_ADMA_RDBUFF_AVAIL (0x7full << 40)
#define CA_ADMA_RDBUFF_AVAIL_SHFT 40
#define CA_PCI_PIO_OP_STAT (1ull << 47)
#define CA_PDMA_OPQ_AVAIL (0xfull << 48)
#define CA_PDMA_OPQ_AVAIL_SHFT 48
#define CA_PDMA_WRBUFF_AVAIL (0xfull << 52)
#define CA_PDMA_WRBUFF_AVAIL_SHFT 52
#define CA_PDMA_RDBUFF_AVAIL (0x3ull << 56)
#define CA_PDMA_RDBUFF_AVAIL_SHFT 56
/* bits 63:58 unused */
/* ==== ca_gart_aperature */
#define CA_GART_AP_ENB_AGP (1ull << 0)
#define CA_GART_PAGE_SIZE (1ull << 1)
#define CA_GART_AP_ENB_PCI (1ull << 2)
/* bits 11:3 unused */
#define CA_GART_AP_SIZE (0x3ffull << 12)
#define CA_GART_AP_SIZE_SHFT 12
#define CA_GART_AP_BASE (0x3ffffffffffull << 22)
#define CA_GART_AP_BASE_SHFT 22
/* ==== ca_inta_dest_addr
==== ca_intb_dest_addr
==== ca_err_int_dest_addr */
/* bits 2:0 unused */
#define CA_INT_DEST_ADDR (0x7ffffffffffffull << 3)
#define CA_INT_DEST_ADDR_SHFT 3
/* bits 55:54 unused */
#define CA_INT_DEST_VECT (0xffull << 56)
#define CA_INT_DEST_VECT_SHFT 56
/* ==== ca_int_status */
/* ==== ca_int_status_alias */
/* ==== ca_mult_error */
/* ==== ca_mult_error_alias */
/* ==== ca_first_error */
/* ==== ca_int_mask */
#define CA_PCI_ERR (1ull << 0)
/* bits 3:1 unused */
#define CA_GART_FETCH_ERR (1ull << 4)
#define CA_GFX_WR_OVFLW (1ull << 5)
#define CA_PIO_REQ_OVFLW (1ull << 6)
#define CA_CRM_PKTERR (1ull << 7)
#define CA_CRM_DVERR (1ull << 8)
#define CA_TNUMTO (1ull << 9)
#define CA_CXM_RSP_CRED_OVFLW (1ull << 10)
#define CA_CXM_REQ_CRED_OVFLW (1ull << 11)
#define CA_PIO_INVALID_ADDR (1ull << 12)
#define CA_PCI_ARB_TO (1ull << 13)
#define CA_AGP_REQ_OFLOW (1ull << 14)
#define CA_SBA_TYPE1_ERR (1ull << 15)
/* bit 16 unused */
#define CA_INTA (1ull << 17)
#define CA_INTB (1ull << 18)
#define CA_MULT_INTA (1ull << 19)
#define CA_MULT_INTB (1ull << 20)
#define CA_GFX_CREDIT_OVFLW (1ull << 21)
/* bits 63:22 unused */
/* ==== ca_crm_pkterr_type */
/* ==== ca_crm_pkterr_type_alias */
#define CA_CRM_PKTERR_SBERR_HDR (1ull << 0)
#define CA_CRM_PKTERR_DIDN (1ull << 1)
#define CA_CRM_PKTERR_PACTYPE (1ull << 2)
#define CA_CRM_PKTERR_INV_TNUM (1ull << 3)
#define CA_CRM_PKTERR_ADDR_RNG (1ull << 4)
#define CA_CRM_PKTERR_ADDR_ALGN (1ull << 5)
#define CA_CRM_PKTERR_HDR_PARAM (1ull << 6)
#define CA_CRM_PKTERR_CW_ERR (1ull << 7)
#define CA_CRM_PKTERR_SBERR_NH (1ull << 8)
#define CA_CRM_PKTERR_EARLY_TERM (1ull << 9)
#define CA_CRM_PKTERR_EARLY_TAIL (1ull << 10)
#define CA_CRM_PKTERR_MSSNG_TAIL (1ull << 11)
#define CA_CRM_PKTERR_MSSNG_HDR (1ull << 12)
/* bits 15:13 unused */
#define CA_FIRST_CRM_PKTERR_SBERR_HDR (1ull << 16)
#define CA_FIRST_CRM_PKTERR_DIDN (1ull << 17)
#define CA_FIRST_CRM_PKTERR_PACTYPE (1ull << 18)
#define CA_FIRST_CRM_PKTERR_INV_TNUM (1ull << 19)
#define CA_FIRST_CRM_PKTERR_ADDR_RNG (1ull << 20)
#define CA_FIRST_CRM_PKTERR_ADDR_ALGN (1ull << 21)
#define CA_FIRST_CRM_PKTERR_HDR_PARAM (1ull << 22)
#define CA_FIRST_CRM_PKTERR_CW_ERR (1ull << 23)
#define CA_FIRST_CRM_PKTERR_SBERR_NH (1ull << 24)
#define CA_FIRST_CRM_PKTERR_EARLY_TERM (1ull << 25)
#define CA_FIRST_CRM_PKTERR_EARLY_TAIL (1ull << 26)
#define CA_FIRST_CRM_PKTERR_MSSNG_TAIL (1ull << 27)
#define CA_FIRST_CRM_PKTERR_MSSNG_HDR (1ull << 28)
/* bits 63:29 unused */
/* ==== ca_crm_ct_error_detail_1 */
#define CA_PKT_TYPE (0xfull << 0)
#define CA_PKT_TYPE_SHFT 0
#define CA_SRC_ID (0x3ull << 4)
#define CA_SRC_ID_SHFT 4
#define CA_DATA_SZ (0x3ull << 6)
#define CA_DATA_SZ_SHFT 6
#define CA_TNUM (0xffull << 8)
#define CA_TNUM_SHFT 8
#define CA_DW_DATA_EN (0xffull << 16)
#define CA_DW_DATA_EN_SHFT 16
#define CA_GFX_CRED (0xffull << 24)
#define CA_GFX_CRED_SHFT 24
#define CA_MEM_RD_PARAM (0x3ull << 32)
#define CA_MEM_RD_PARAM_SHFT 32
#define CA_PIO_OP (1ull << 34)
#define CA_CW_ERR (1ull << 35)
/* bits 62:36 unused */
#define CA_VALID (1ull << 63)
/* ==== ca_crm_ct_error_detail_2 */
/* bits 2:0 unused */
#define CA_PKT_ADDR (0x1fffffffffffffull << 3)
#define CA_PKT_ADDR_SHFT 3
/* bits 63:56 unused */
/* ==== ca_crm_tnumto */
#define CA_CRM_TNUMTO_VAL (0xffull << 0)
#define CA_CRM_TNUMTO_VAL_SHFT 0
#define CA_CRM_TNUMTO_WR (1ull << 8)
/* bits 63:9 unused */
/* ==== ca_gart_err */
#define CA_GART_ERR_SOURCE (0x3ull << 0)
#define CA_GART_ERR_SOURCE_SHFT 0
/* bits 3:2 unused */
#define CA_GART_ERR_ADDR (0xfffffffffull << 4)
#define CA_GART_ERR_ADDR_SHFT 4
/* bits 63:40 unused */
/* ==== ca_pcierr_type */
#define CA_PCIERR_DATA (0xffffffffull << 0)
#define CA_PCIERR_DATA_SHFT 0
#define CA_PCIERR_ENB (0xfull << 32)
#define CA_PCIERR_ENB_SHFT 32
#define CA_PCIERR_CMD (0xfull << 36)
#define CA_PCIERR_CMD_SHFT 36
#define CA_PCIERR_A64 (1ull << 40)
#define CA_PCIERR_SLV_SERR (1ull << 41)
#define CA_PCIERR_SLV_WR_PERR (1ull << 42)
#define CA_PCIERR_SLV_RD_PERR (1ull << 43)
#define CA_PCIERR_MST_SERR (1ull << 44)
#define CA_PCIERR_MST_WR_PERR (1ull << 45)
#define CA_PCIERR_MST_RD_PERR (1ull << 46)
#define CA_PCIERR_MST_MABT (1ull << 47)
#define CA_PCIERR_MST_TABT (1ull << 48)
#define CA_PCIERR_MST_RETRY_TOUT (1ull << 49)
#define CA_PCIERR_TYPES \
(CA_PCIERR_A64|CA_PCIERR_SLV_SERR| \
CA_PCIERR_SLV_WR_PERR|CA_PCIERR_SLV_RD_PERR| \
CA_PCIERR_MST_SERR|CA_PCIERR_MST_WR_PERR|CA_PCIERR_MST_RD_PERR| \
CA_PCIERR_MST_MABT|CA_PCIERR_MST_TABT|CA_PCIERR_MST_RETRY_TOUT)
/* bits 63:50 unused */
/* ==== ca_pci_dma_addr_extn */
#define CA_UPPER_NODE_OFFSET (0x3full << 0)
#define CA_UPPER_NODE_OFFSET_SHFT 0
/* bits 7:6 unused */
#define CA_CHIPLET_ID (0x3ull << 8)
#define CA_CHIPLET_ID_SHFT 8
/* bits 11:10 unused */
#define CA_PCI_DMA_NODE_ID (0xffffull << 12)
#define CA_PCI_DMA_NODE_ID_SHFT 12
/* bits 27:26 unused */
#define CA_PCI_DMA_PIO_MEM_TYPE (1ull << 28)
/* bits 63:29 unused */
/* ==== ca_agp_dma_addr_extn */
/* bits 19:0 unused */
#define CA_AGP_DMA_NODE_ID (0xffffull << 20)
#define CA_AGP_DMA_NODE_ID_SHFT 20
/* bits 27:26 unused */
#define CA_AGP_DMA_PIO_MEM_TYPE (1ull << 28)
/* bits 63:29 unused */
/* ==== ca_debug_vector_sel */
#define CA_DEBUG_MN_VSEL (0xfull << 0)
#define CA_DEBUG_MN_VSEL_SHFT 0
#define CA_DEBUG_PP_VSEL (0xfull << 4)
#define CA_DEBUG_PP_VSEL_SHFT 4
#define CA_DEBUG_GW_VSEL (0xfull << 8)
#define CA_DEBUG_GW_VSEL_SHFT 8
#define CA_DEBUG_GT_VSEL (0xfull << 12)
#define CA_DEBUG_GT_VSEL_SHFT 12
#define CA_DEBUG_PD_VSEL (0xfull << 16)
#define CA_DEBUG_PD_VSEL_SHFT 16
#define CA_DEBUG_AD_VSEL (0xfull << 20)
#define CA_DEBUG_AD_VSEL_SHFT 20
#define CA_DEBUG_CX_VSEL (0xfull << 24)
#define CA_DEBUG_CX_VSEL_SHFT 24
#define CA_DEBUG_CR_VSEL (0xfull << 28)
#define CA_DEBUG_CR_VSEL_SHFT 28
#define CA_DEBUG_BA_VSEL (0xfull << 32)
#define CA_DEBUG_BA_VSEL_SHFT 32
#define CA_DEBUG_PE_VSEL (0xfull << 36)
#define CA_DEBUG_PE_VSEL_SHFT 36
#define CA_DEBUG_BO_VSEL (0xfull << 40)
#define CA_DEBUG_BO_VSEL_SHFT 40
#define CA_DEBUG_BI_VSEL (0xfull << 44)
#define CA_DEBUG_BI_VSEL_SHFT 44
#define CA_DEBUG_AS_VSEL (0xfull << 48)
#define CA_DEBUG_AS_VSEL_SHFT 48
#define CA_DEBUG_PS_VSEL (0xfull << 52)
#define CA_DEBUG_PS_VSEL_SHFT 52
#define CA_DEBUG_PM_VSEL (0xfull << 56)
#define CA_DEBUG_PM_VSEL_SHFT 56
/* bits 63:60 unused */
/* ==== ca_debug_mux_core_sel */
/* ==== ca_debug_mux_pci_sel */
#define CA_DEBUG_MSEL0 (0x7ull << 0)
#define CA_DEBUG_MSEL0_SHFT 0
/* bit 3 unused */
#define CA_DEBUG_NSEL0 (0x7ull << 4)
#define CA_DEBUG_NSEL0_SHFT 4
/* bit 7 unused */
#define CA_DEBUG_MSEL1 (0x7ull << 8)
#define CA_DEBUG_MSEL1_SHFT 8
/* bit 11 unused */
#define CA_DEBUG_NSEL1 (0x7ull << 12)
#define CA_DEBUG_NSEL1_SHFT 12
/* bit 15 unused */
#define CA_DEBUG_MSEL2 (0x7ull << 16)
#define CA_DEBUG_MSEL2_SHFT 16
/* bit 19 unused */
#define CA_DEBUG_NSEL2 (0x7ull << 20)
#define CA_DEBUG_NSEL2_SHFT 20
/* bit 23 unused */
#define CA_DEBUG_MSEL3 (0x7ull << 24)
#define CA_DEBUG_MSEL3_SHFT 24
/* bit 27 unused */
#define CA_DEBUG_NSEL3 (0x7ull << 28)
#define CA_DEBUG_NSEL3_SHFT 28
/* bit 31 unused */
#define CA_DEBUG_MSEL4 (0x7ull << 32)
#define CA_DEBUG_MSEL4_SHFT 32
/* bit 35 unused */
#define CA_DEBUG_NSEL4 (0x7ull << 36)
#define CA_DEBUG_NSEL4_SHFT 36
/* bit 39 unused */
#define CA_DEBUG_MSEL5 (0x7ull << 40)
#define CA_DEBUG_MSEL5_SHFT 40
/* bit 43 unused */
#define CA_DEBUG_NSEL5 (0x7ull << 44)
#define CA_DEBUG_NSEL5_SHFT 44
/* bit 47 unused */
#define CA_DEBUG_MSEL6 (0x7ull << 48)
#define CA_DEBUG_MSEL6_SHFT 48
/* bit 51 unused */
#define CA_DEBUG_NSEL6 (0x7ull << 52)
#define CA_DEBUG_NSEL6_SHFT 52
/* bit 55 unused */
#define CA_DEBUG_MSEL7 (0x7ull << 56)
#define CA_DEBUG_MSEL7_SHFT 56
/* bit 59 unused */
#define CA_DEBUG_NSEL7 (0x7ull << 60)
#define CA_DEBUG_NSEL7_SHFT 60
/* bit 63 unused */
/* ==== ca_debug_domain_sel */
#define CA_DEBUG_DOMAIN_L (1ull << 0)
#define CA_DEBUG_DOMAIN_H (1ull << 1)
/* bits 63:2 unused */
/* ==== ca_gart_ptr_table */
#define CA_GART_PTR_VAL (1ull << 0)
/* bits 11:1 unused */
#define CA_GART_PTR_ADDR (0xfffffffffffull << 12)
#define CA_GART_PTR_ADDR_SHFT 12
/* bits 63:56 unused */
/* ==== ca_gart_tlb_addr[0-7] */
#define CA_GART_TLB_ADDR (0xffffffffffffffull << 0)
#define CA_GART_TLB_ADDR_SHFT 0
/* bits 62:56 unused */
#define CA_GART_TLB_ENTRY_VAL (1ull << 63)
/*
* PIO address space ranges for TIO:CA
*/
/* CA internal registers */
#define CA_PIO_ADMIN 0x00000000
#define CA_PIO_ADMIN_LEN 0x00010000
/* GFX Write Buffer - Diagnostics */
#define CA_PIO_GFX 0x00010000
#define CA_PIO_GFX_LEN 0x00010000
/* AGP DMA Write Buffer - Diagnostics */
#define CA_PIO_AGP_DMAWRITE 0x00020000
#define CA_PIO_AGP_DMAWRITE_LEN 0x00010000
/* AGP DMA READ Buffer - Diagnostics */
#define CA_PIO_AGP_DMAREAD 0x00030000
#define CA_PIO_AGP_DMAREAD_LEN 0x00010000
/* PCI Config Type 0 */
#define CA_PIO_PCI_TYPE0_CONFIG 0x01000000
#define CA_PIO_PCI_TYPE0_CONFIG_LEN 0x01000000
/* PCI Config Type 1 */
#define CA_PIO_PCI_TYPE1_CONFIG 0x02000000
#define CA_PIO_PCI_TYPE1_CONFIG_LEN 0x01000000
/* PCI I/O Cycles - mapped to PCI Address 0x00000000-0x04ffffff */
#define CA_PIO_PCI_IO 0x03000000
#define CA_PIO_PCI_IO_LEN 0x05000000
/* PCI MEM Cycles - mapped to PCI with CA_PIO_ADDR_OFFSET of ca_control1 */
/* use Fast Write if enabled and coretalk packet type is a GFX request */
#define CA_PIO_PCI_MEM_OFFSET 0x08000000
#define CA_PIO_PCI_MEM_OFFSET_LEN 0x08000000
/* PCI MEM Cycles - mapped to PCI Address 0x00000000-0xbfffffff */
/* use Fast Write if enabled and coretalk packet type is a GFX request */
#define CA_PIO_PCI_MEM 0x40000000
#define CA_PIO_PCI_MEM_LEN 0xc0000000
/*
* DMA space
*
* The CA aperature (ie. bus address range) mapped by the GART is segmented into
* two parts. The lower portion of the aperature is used for mapping 32 bit
* PCI addresses which are managed by the dma interfaces in this file. The
* upper poprtion of the aperature is used for mapping 48 bit AGP addresses.
* The AGP portion of the aperature is managed by the agpgart_be.c driver
* in drivers/linux/agp. There are ca-specific hooks in that driver to
* manipulate the gart, but management of the AGP portion of the aperature
* is the responsibility of that driver.
*
* CA allows three main types of DMA mapping:
*
* PCI 64-bit Managed by this driver
* PCI 32-bit Managed by this driver
* AGP 48-bit Managed by hooks in the /dev/agpgart driver
*
* All of the above can optionally be remapped through the GART. The following
* table lists the combinations of addressing types and GART remapping that
* is currently supported by the driver (h/w supports all, s/w limits this):
*
* PCI64 PCI32 AGP48
* GART no yes yes
* Direct yes yes no
*
* GART remapping of PCI64 is not done because there is no need to. The
* 64 bit PCI address holds all of the information necessary to target any
* memory in the system.
*
* AGP48 is always mapped through the GART. Management of the AGP48 portion
* of the aperature is the responsibility of code in the agpgart_be driver.
*
* The non-64 bit bus address space will currently be partitioned like this:
*
* 0xffff_ffff_ffff +--------
* | AGP48 direct
* | Space managed by this driver
* CA_AGP_DIRECT_BASE +--------
* | AGP GART mapped (gfx aperature)
* | Space managed by /dev/agpgart driver
* | This range is exposed to the agpgart
* | driver as the "graphics aperature"
* CA_AGP_MAPPED_BASE +-----
* | PCI GART mapped
* | Space managed by this driver
* CA_PCI32_MAPPED_BASE +----
* | PCI32 direct
* | Space managed by this driver
* 0xC000_0000 +--------
* (CA_PCI32_DIRECT_BASE)
*
* The bus address range CA_PCI32_MAPPED_BASE through CA_AGP_DIRECT_BASE
* is what we call the CA aperature. Addresses falling in this range will
* be remapped using the GART.
*
* The bus address range CA_AGP_MAPPED_BASE through CA_AGP_DIRECT_BASE
* is what we call the graphics aperature. This is a subset of the CA
* aperature and is under the control of the agpgart_be driver.
*
* CA_PCI32_MAPPED_BASE, CA_AGP_MAPPED_BASE, and CA_AGP_DIRECT_BASE are
* somewhat arbitrary values. The known constraints on choosing these is:
*
* 1) CA_AGP_DIRECT_BASE-CA_PCI32_MAPPED_BASE+1 (the CA aperature size)
* must be one of the values supported by the ca_gart_aperature register.
* Currently valid values are: 4MB through 4096MB in powers of 2 increments
*
* 2) CA_AGP_DIRECT_BASE-CA_AGP_MAPPED_BASE+1 (the gfx aperature size)
* must be in MB units since that's what the agpgart driver assumes.
*/
/*
* Define Bus DMA ranges. These are configurable (see constraints above)
* and will probably need tuning based on experience.
*/
/*
* 11/24/03
* CA has an addressing glitch w.r.t. PCI direct 32 bit DMA that makes it
* generally unusable. The problem is that for PCI direct 32
* DMA's, all 32 bits of the bus address are used to form the lower 32 bits
* of the coretalk address, and coretalk bits 38:32 come from a register.
* Since only PCI bus addresses 0xC0000000-0xFFFFFFFF (1GB) are available
* for DMA (the rest is allocated to PIO), host node addresses need to be
* such that their lower 32 bits fall in the 0xC0000000-0xffffffff range
* as well. So there can be no PCI32 direct DMA below 3GB!! For this
* reason we set the CA_PCI32_DIRECT_SIZE to 0 which essentially makes
* tioca_dma_direct32() a noop but preserves the code flow should this issue
* be fixed in a respin.
*
* For now, all PCI32 DMA's must be mapped through the GART.
*/
#define CA_PCI32_DIRECT_BASE 0xC0000000UL /* BASE not configurable */
#define CA_PCI32_DIRECT_SIZE 0x00000000UL /* 0 MB */
#define CA_PCI32_MAPPED_BASE 0xC0000000UL
#define CA_PCI32_MAPPED_SIZE 0x40000000UL /* 2GB */
#define CA_AGP_MAPPED_BASE 0x80000000UL
#define CA_AGP_MAPPED_SIZE 0x40000000UL /* 2GB */
#define CA_AGP_DIRECT_BASE 0x40000000UL /* 2GB */
#define CA_AGP_DIRECT_SIZE 0x40000000UL
#define CA_APERATURE_BASE (CA_AGP_MAPPED_BASE)
#define CA_APERATURE_SIZE (CA_AGP_MAPPED_SIZE+CA_PCI32_MAPPED_SIZE)
#endif /* _ASM_IA64_SN_TIO_TIOCA_H */

View file

@ -1,207 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H
#define _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H
#include <asm/sn/tioca.h>
/*
* WAR enables
* Defines for individual WARs. Each is a bitmask of applicable
* part revision numbers. (1 << 1) == rev A, (1 << 2) == rev B,
* (3 << 1) == (rev A or rev B), etc
*/
#define TIOCA_WAR_ENABLED(pv, tioca_common) \
((1 << tioca_common->ca_rev) & pv)
/* TIO:ICE:FRZ:Freezer loses a PIO data ucred on PIO RD RSP with CW error */
#define PV907908 (1 << 1)
/* ATI config space problems after BIOS execution starts */
#define PV908234 (1 << 1)
/* CA:AGPDMA write request data mismatch with ABC1CL merge */
#define PV895469 (1 << 1)
/* TIO:CA TLB invalidate of written GART entries possibly not occurring in CA*/
#define PV910244 (1 << 1)
struct tioca_dmamap{
struct list_head cad_list; /* headed by ca_list */
dma_addr_t cad_dma_addr; /* Linux dma handle */
uint cad_gart_entry; /* start entry in ca_gart_pagemap */
uint cad_gart_size; /* #entries for this map */
};
/*
* Kernel only fields. Prom may look at this stuff for debugging only.
* Access this structure through the ca_kernel_private ptr.
*/
struct tioca_common ;
struct tioca_kernel {
struct tioca_common *ca_common; /* tioca this belongs to */
struct list_head ca_list; /* list of all ca's */
struct list_head ca_dmamaps;
spinlock_t ca_lock; /* Kernel lock */
cnodeid_t ca_closest_node;
struct list_head *ca_devices; /* bus->devices */
/*
* General GART stuff
*/
u64 ca_ap_size; /* size of aperature in bytes */
u32 ca_gart_entries; /* # u64 entries in gart */
u32 ca_ap_pagesize; /* aperature page size in bytes */
u64 ca_ap_bus_base; /* bus address of CA aperature */
u64 ca_gart_size; /* gart size in bytes */
u64 *ca_gart; /* gart table vaddr */
u64 ca_gart_coretalk_addr; /* gart coretalk addr */
u8 ca_gart_iscoherent; /* used in tioca_tlbflush */
/* PCI GART convenience values */
u64 ca_pciap_base; /* pci aperature bus base address */
u64 ca_pciap_size; /* pci aperature size (bytes) */
u64 ca_pcigart_base; /* gfx GART bus base address */
u64 *ca_pcigart; /* gfx GART vm address */
u32 ca_pcigart_entries;
u32 ca_pcigart_start; /* PCI start index in ca_gart */
void *ca_pcigart_pagemap;
/* AGP GART convenience values */
u64 ca_gfxap_base; /* gfx aperature bus base address */
u64 ca_gfxap_size; /* gfx aperature size (bytes) */
u64 ca_gfxgart_base; /* gfx GART bus base address */
u64 *ca_gfxgart; /* gfx GART vm address */
u32 ca_gfxgart_entries;
u32 ca_gfxgart_start; /* agpgart start index in ca_gart */
};
/*
* Common tioca info shared between kernel and prom
*
* DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES
* TO THE PROM VERSION.
*/
struct tioca_common {
struct pcibus_bussoft ca_common; /* common pciio header */
u32 ca_rev;
u32 ca_closest_nasid;
u64 ca_prom_private;
u64 ca_kernel_private;
};
/**
* tioca_paddr_to_gart - Convert an SGI coretalk address to a CA GART entry
* @paddr: page address to convert
*
* Convert a system [coretalk] address to a GART entry. GART entries are
* formed using the following:
*
* data = ( (1<<63) | ( (REMAP_NODE_ID << 40) | (MD_CHIPLET_ID << 38) |
* (REMAP_SYS_ADDR) ) >> 12 )
*
* DATA written to 1 GART TABLE Entry in system memory is remapped system
* addr for 1 page
*
* The data is for coretalk address format right shifted 12 bits with a
* valid bit.
*
* GART_TABLE_ENTRY [ 25:0 ] -- REMAP_SYS_ADDRESS[37:12].
* GART_TABLE_ENTRY [ 27:26 ] -- SHUB MD chiplet id.
* GART_TABLE_ENTRY [ 41:28 ] -- REMAP_NODE_ID.
* GART_TABLE_ENTRY [ 63 ] -- Valid Bit
*/
static inline u64
tioca_paddr_to_gart(unsigned long paddr)
{
/*
* We are assuming right now that paddr already has the correct
* format since the address from xtalk_dmaXXX should already have
* NODE_ID, CHIPLET_ID, and SYS_ADDR in the correct locations.
*/
return ((paddr) >> 12) | (1UL << 63);
}
/**
* tioca_physpage_to_gart - Map a host physical page for SGI CA based DMA
* @page_addr: system page address to map
*/
static inline unsigned long
tioca_physpage_to_gart(u64 page_addr)
{
u64 coretalk_addr;
coretalk_addr = PHYS_TO_TIODMA(page_addr);
if (!coretalk_addr) {
return 0;
}
return tioca_paddr_to_gart(coretalk_addr);
}
/**
* tioca_tlbflush - invalidate cached SGI CA GART TLB entries
* @tioca_kernel: CA context
*
* Invalidate tlb entries for a given CA GART. Main complexity is to account
* for revA bug.
*/
static inline void
tioca_tlbflush(struct tioca_kernel *tioca_kernel)
{
volatile u64 tmp;
volatile struct tioca __iomem *ca_base;
struct tioca_common *tioca_common;
tioca_common = tioca_kernel->ca_common;
ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
/*
* Explicit flushes not needed if GART is in cached mode
*/
if (tioca_kernel->ca_gart_iscoherent) {
if (TIOCA_WAR_ENABLED(PV910244, tioca_common)) {
/*
* PV910244: RevA CA needs explicit flushes.
* Need to put GART into uncached mode before
* flushing otherwise the explicit flush is ignored.
*
* Alternate WAR would be to leave GART cached and
* touch every CL aligned GART entry.
*/
__sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
__sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB);
__sn_setq_relaxed(&ca_base->ca_control2,
(0x2ull << CA_GART_MEM_PARAM_SHFT));
tmp = __sn_readq_relaxed(&ca_base->ca_control2);
}
return;
}
/*
* Gart in uncached mode ... need an explicit flush.
*/
__sn_setq_relaxed(&ca_base->ca_control2, CA_GART_FLUSH_TLB);
tmp = __sn_readq_relaxed(&ca_base->ca_control2);
}
extern u32 tioca_gart_found;
extern struct list_head tioca_list;
extern int tioca_init_provider(void);
extern void tioca_fastwrite_enable(struct tioca_kernel *tioca_kern);
#endif /* _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H */

View file

@ -1,760 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef __ASM_IA64_SN_TIOCE_H__
#define __ASM_IA64_SN_TIOCE_H__
/* CE ASIC part & mfgr information */
#define TIOCE_PART_NUM 0xCE00
#define TIOCE_SRC_ID 0x01
#define TIOCE_REV_A 0x1
/* CE Virtual PPB Vendor/Device IDs */
#define CE_VIRT_PPB_VENDOR_ID 0x10a9
#define CE_VIRT_PPB_DEVICE_ID 0x4002
/* CE Host Bridge Vendor/Device IDs */
#define CE_HOST_BRIDGE_VENDOR_ID 0x10a9
#define CE_HOST_BRIDGE_DEVICE_ID 0x4001
#define TIOCE_NUM_M40_ATES 4096
#define TIOCE_NUM_M3240_ATES 2048
#define TIOCE_NUM_PORTS 2
/*
* Register layout for TIOCE. MMR offsets are shown at the far right of the
* structure definition.
*/
typedef volatile struct tioce {
/*
* ADMIN : Administration Registers
*/
u64 ce_adm_id; /* 0x000000 */
u64 ce_pad_000008; /* 0x000008 */
u64 ce_adm_dyn_credit_status; /* 0x000010 */
u64 ce_adm_last_credit_status; /* 0x000018 */
u64 ce_adm_credit_limit; /* 0x000020 */
u64 ce_adm_force_credit; /* 0x000028 */
u64 ce_adm_control; /* 0x000030 */
u64 ce_adm_mmr_chn_timeout; /* 0x000038 */
u64 ce_adm_ssp_ure_timeout; /* 0x000040 */
u64 ce_adm_ssp_dre_timeout; /* 0x000048 */
u64 ce_adm_ssp_debug_sel; /* 0x000050 */
u64 ce_adm_int_status; /* 0x000058 */
u64 ce_adm_int_status_alias; /* 0x000060 */
u64 ce_adm_int_mask; /* 0x000068 */
u64 ce_adm_int_pending; /* 0x000070 */
u64 ce_adm_force_int; /* 0x000078 */
u64 ce_adm_ure_ups_buf_barrier_flush; /* 0x000080 */
u64 ce_adm_int_dest[15]; /* 0x000088 -- 0x0000F8 */
u64 ce_adm_error_summary; /* 0x000100 */
u64 ce_adm_error_summary_alias; /* 0x000108 */
u64 ce_adm_error_mask; /* 0x000110 */
u64 ce_adm_first_error; /* 0x000118 */
u64 ce_adm_error_overflow; /* 0x000120 */
u64 ce_adm_error_overflow_alias; /* 0x000128 */
u64 ce_pad_000130[2]; /* 0x000130 -- 0x000138 */
u64 ce_adm_tnum_error; /* 0x000140 */
u64 ce_adm_mmr_err_detail; /* 0x000148 */
u64 ce_adm_msg_sram_perr_detail; /* 0x000150 */
u64 ce_adm_bap_sram_perr_detail; /* 0x000158 */
u64 ce_adm_ce_sram_perr_detail; /* 0x000160 */
u64 ce_adm_ce_credit_oflow_detail; /* 0x000168 */
u64 ce_adm_tx_link_idle_max_timer; /* 0x000170 */
u64 ce_adm_pcie_debug_sel; /* 0x000178 */
u64 ce_pad_000180[16]; /* 0x000180 -- 0x0001F8 */
u64 ce_adm_pcie_debug_sel_top; /* 0x000200 */
u64 ce_adm_pcie_debug_lat_sel_lo_top; /* 0x000208 */
u64 ce_adm_pcie_debug_lat_sel_hi_top; /* 0x000210 */
u64 ce_adm_pcie_debug_trig_sel_top; /* 0x000218 */
u64 ce_adm_pcie_debug_trig_lat_sel_lo_top; /* 0x000220 */
u64 ce_adm_pcie_debug_trig_lat_sel_hi_top; /* 0x000228 */
u64 ce_adm_pcie_trig_compare_top; /* 0x000230 */
u64 ce_adm_pcie_trig_compare_en_top; /* 0x000238 */
u64 ce_adm_ssp_debug_sel_top; /* 0x000240 */
u64 ce_adm_ssp_debug_lat_sel_lo_top; /* 0x000248 */
u64 ce_adm_ssp_debug_lat_sel_hi_top; /* 0x000250 */
u64 ce_adm_ssp_debug_trig_sel_top; /* 0x000258 */
u64 ce_adm_ssp_debug_trig_lat_sel_lo_top; /* 0x000260 */
u64 ce_adm_ssp_debug_trig_lat_sel_hi_top; /* 0x000268 */
u64 ce_adm_ssp_trig_compare_top; /* 0x000270 */
u64 ce_adm_ssp_trig_compare_en_top; /* 0x000278 */
u64 ce_pad_000280[48]; /* 0x000280 -- 0x0003F8 */
u64 ce_adm_bap_ctrl; /* 0x000400 */
u64 ce_pad_000408[127]; /* 0x000408 -- 0x0007F8 */
u64 ce_msg_buf_data63_0[35]; /* 0x000800 -- 0x000918 */
u64 ce_pad_000920[29]; /* 0x000920 -- 0x0009F8 */
u64 ce_msg_buf_data127_64[35]; /* 0x000A00 -- 0x000B18 */
u64 ce_pad_000B20[29]; /* 0x000B20 -- 0x000BF8 */
u64 ce_msg_buf_parity[35]; /* 0x000C00 -- 0x000D18 */
u64 ce_pad_000D20[29]; /* 0x000D20 -- 0x000DF8 */
u64 ce_pad_000E00[576]; /* 0x000E00 -- 0x001FF8 */
/*
* LSI : LSI's PCI Express Link Registers (Link#1 and Link#2)
* Link#1 MMRs at start at 0x002000, Link#2 MMRs at 0x003000
* NOTE: the comment offsets at far right: let 'z' = {2 or 3}
*/
#define ce_lsi(link_num) ce_lsi[link_num-1]
struct ce_lsi_reg {
u64 ce_lsi_lpu_id; /* 0x00z000 */
u64 ce_lsi_rst; /* 0x00z008 */
u64 ce_lsi_dbg_stat; /* 0x00z010 */
u64 ce_lsi_dbg_cfg; /* 0x00z018 */
u64 ce_lsi_ltssm_ctrl; /* 0x00z020 */
u64 ce_lsi_lk_stat; /* 0x00z028 */
u64 ce_pad_00z030[2]; /* 0x00z030 -- 0x00z038 */
u64 ce_lsi_int_and_stat; /* 0x00z040 */
u64 ce_lsi_int_mask; /* 0x00z048 */
u64 ce_pad_00z050[22]; /* 0x00z050 -- 0x00z0F8 */
u64 ce_lsi_lk_perf_cnt_sel; /* 0x00z100 */
u64 ce_pad_00z108; /* 0x00z108 */
u64 ce_lsi_lk_perf_cnt_ctrl; /* 0x00z110 */
u64 ce_pad_00z118; /* 0x00z118 */
u64 ce_lsi_lk_perf_cnt1; /* 0x00z120 */
u64 ce_lsi_lk_perf_cnt1_test; /* 0x00z128 */
u64 ce_lsi_lk_perf_cnt2; /* 0x00z130 */
u64 ce_lsi_lk_perf_cnt2_test; /* 0x00z138 */
u64 ce_pad_00z140[24]; /* 0x00z140 -- 0x00z1F8 */
u64 ce_lsi_lk_lyr_cfg; /* 0x00z200 */
u64 ce_lsi_lk_lyr_status; /* 0x00z208 */
u64 ce_lsi_lk_lyr_int_stat; /* 0x00z210 */
u64 ce_lsi_lk_ly_int_stat_test; /* 0x00z218 */
u64 ce_lsi_lk_ly_int_stat_mask; /* 0x00z220 */
u64 ce_pad_00z228[3]; /* 0x00z228 -- 0x00z238 */
u64 ce_lsi_fc_upd_ctl; /* 0x00z240 */
u64 ce_pad_00z248[3]; /* 0x00z248 -- 0x00z258 */
u64 ce_lsi_flw_ctl_upd_to_timer; /* 0x00z260 */
u64 ce_lsi_flw_ctl_upd_timer0; /* 0x00z268 */
u64 ce_lsi_flw_ctl_upd_timer1; /* 0x00z270 */
u64 ce_pad_00z278[49]; /* 0x00z278 -- 0x00z3F8 */
u64 ce_lsi_freq_nak_lat_thrsh; /* 0x00z400 */
u64 ce_lsi_ack_nak_lat_tmr; /* 0x00z408 */
u64 ce_lsi_rply_tmr_thr; /* 0x00z410 */
u64 ce_lsi_rply_tmr; /* 0x00z418 */
u64 ce_lsi_rply_num_stat; /* 0x00z420 */
u64 ce_lsi_rty_buf_max_addr; /* 0x00z428 */
u64 ce_lsi_rty_fifo_ptr; /* 0x00z430 */
u64 ce_lsi_rty_fifo_rd_wr_ptr; /* 0x00z438 */
u64 ce_lsi_rty_fifo_cred; /* 0x00z440 */
u64 ce_lsi_seq_cnt; /* 0x00z448 */
u64 ce_lsi_ack_sent_seq_num; /* 0x00z450 */
u64 ce_lsi_seq_cnt_fifo_max_addr; /* 0x00z458 */
u64 ce_lsi_seq_cnt_fifo_ptr; /* 0x00z460 */
u64 ce_lsi_seq_cnt_rd_wr_ptr; /* 0x00z468 */
u64 ce_lsi_tx_lk_ts_ctl; /* 0x00z470 */
u64 ce_pad_00z478; /* 0x00z478 */
u64 ce_lsi_mem_addr_ctl; /* 0x00z480 */
u64 ce_lsi_mem_d_ld0; /* 0x00z488 */
u64 ce_lsi_mem_d_ld1; /* 0x00z490 */
u64 ce_lsi_mem_d_ld2; /* 0x00z498 */
u64 ce_lsi_mem_d_ld3; /* 0x00z4A0 */
u64 ce_lsi_mem_d_ld4; /* 0x00z4A8 */
u64 ce_pad_00z4B0[2]; /* 0x00z4B0 -- 0x00z4B8 */
u64 ce_lsi_rty_d_cnt; /* 0x00z4C0 */
u64 ce_lsi_seq_buf_cnt; /* 0x00z4C8 */
u64 ce_lsi_seq_buf_bt_d; /* 0x00z4D0 */
u64 ce_pad_00z4D8; /* 0x00z4D8 */
u64 ce_lsi_ack_lat_thr; /* 0x00z4E0 */
u64 ce_pad_00z4E8[3]; /* 0x00z4E8 -- 0x00z4F8 */
u64 ce_lsi_nxt_rcv_seq_1_cntr; /* 0x00z500 */
u64 ce_lsi_unsp_dllp_rcvd; /* 0x00z508 */
u64 ce_lsi_rcv_lk_ts_ctl; /* 0x00z510 */
u64 ce_pad_00z518[29]; /* 0x00z518 -- 0x00z5F8 */
u64 ce_lsi_phy_lyr_cfg; /* 0x00z600 */
u64 ce_pad_00z608; /* 0x00z608 */
u64 ce_lsi_phy_lyr_int_stat; /* 0x00z610 */
u64 ce_lsi_phy_lyr_int_stat_test; /* 0x00z618 */
u64 ce_lsi_phy_lyr_int_mask; /* 0x00z620 */
u64 ce_pad_00z628[11]; /* 0x00z628 -- 0x00z678 */
u64 ce_lsi_rcv_phy_cfg; /* 0x00z680 */
u64 ce_lsi_rcv_phy_stat1; /* 0x00z688 */
u64 ce_lsi_rcv_phy_stat2; /* 0x00z690 */
u64 ce_lsi_rcv_phy_stat3; /* 0x00z698 */
u64 ce_lsi_rcv_phy_int_stat; /* 0x00z6A0 */
u64 ce_lsi_rcv_phy_int_stat_test; /* 0x00z6A8 */
u64 ce_lsi_rcv_phy_int_mask; /* 0x00z6B0 */
u64 ce_pad_00z6B8[9]; /* 0x00z6B8 -- 0x00z6F8 */
u64 ce_lsi_tx_phy_cfg; /* 0x00z700 */
u64 ce_lsi_tx_phy_stat; /* 0x00z708 */
u64 ce_lsi_tx_phy_int_stat; /* 0x00z710 */
u64 ce_lsi_tx_phy_int_stat_test; /* 0x00z718 */
u64 ce_lsi_tx_phy_int_mask; /* 0x00z720 */
u64 ce_lsi_tx_phy_stat2; /* 0x00z728 */
u64 ce_pad_00z730[10]; /* 0x00z730 -- 0x00z77F */
u64 ce_lsi_ltssm_cfg1; /* 0x00z780 */
u64 ce_lsi_ltssm_cfg2; /* 0x00z788 */
u64 ce_lsi_ltssm_cfg3; /* 0x00z790 */
u64 ce_lsi_ltssm_cfg4; /* 0x00z798 */
u64 ce_lsi_ltssm_cfg5; /* 0x00z7A0 */
u64 ce_lsi_ltssm_stat1; /* 0x00z7A8 */
u64 ce_lsi_ltssm_stat2; /* 0x00z7B0 */
u64 ce_lsi_ltssm_int_stat; /* 0x00z7B8 */
u64 ce_lsi_ltssm_int_stat_test; /* 0x00z7C0 */
u64 ce_lsi_ltssm_int_mask; /* 0x00z7C8 */
u64 ce_lsi_ltssm_stat_wr_en; /* 0x00z7D0 */
u64 ce_pad_00z7D8[5]; /* 0x00z7D8 -- 0x00z7F8 */
u64 ce_lsi_gb_cfg1; /* 0x00z800 */
u64 ce_lsi_gb_cfg2; /* 0x00z808 */
u64 ce_lsi_gb_cfg3; /* 0x00z810 */
u64 ce_lsi_gb_cfg4; /* 0x00z818 */
u64 ce_lsi_gb_stat; /* 0x00z820 */
u64 ce_lsi_gb_int_stat; /* 0x00z828 */
u64 ce_lsi_gb_int_stat_test; /* 0x00z830 */
u64 ce_lsi_gb_int_mask; /* 0x00z838 */
u64 ce_lsi_gb_pwr_dn1; /* 0x00z840 */
u64 ce_lsi_gb_pwr_dn2; /* 0x00z848 */
u64 ce_pad_00z850[246]; /* 0x00z850 -- 0x00zFF8 */
} ce_lsi[2];
u64 ce_pad_004000[10]; /* 0x004000 -- 0x004048 */
/*
* CRM: Coretalk Receive Module Registers
*/
u64 ce_crm_debug_mux; /* 0x004050 */
u64 ce_pad_004058; /* 0x004058 */
u64 ce_crm_ssp_err_cmd_wrd; /* 0x004060 */
u64 ce_crm_ssp_err_addr; /* 0x004068 */
u64 ce_crm_ssp_err_syn; /* 0x004070 */
u64 ce_pad_004078[499]; /* 0x004078 -- 0x005008 */
/*
* CXM: Coretalk Xmit Module Registers
*/
u64 ce_cxm_dyn_credit_status; /* 0x005010 */
u64 ce_cxm_last_credit_status; /* 0x005018 */
u64 ce_cxm_credit_limit; /* 0x005020 */
u64 ce_cxm_force_credit; /* 0x005028 */
u64 ce_cxm_disable_bypass; /* 0x005030 */
u64 ce_pad_005038[3]; /* 0x005038 -- 0x005048 */
u64 ce_cxm_debug_mux; /* 0x005050 */
u64 ce_pad_005058[501]; /* 0x005058 -- 0x005FF8 */
/*
* DTL: Downstream Transaction Layer Regs (Link#1 and Link#2)
* DTL: Link#1 MMRs at start at 0x006000, Link#2 MMRs at 0x008000
* DTL: the comment offsets at far right: let 'y' = {6 or 8}
*
* UTL: Downstream Transaction Layer Regs (Link#1 and Link#2)
* UTL: Link#1 MMRs at start at 0x007000, Link#2 MMRs at 0x009000
* UTL: the comment offsets at far right: let 'z' = {7 or 9}
*/
#define ce_dtl(link_num) ce_dtl_utl[link_num-1]
#define ce_utl(link_num) ce_dtl_utl[link_num-1]
struct ce_dtl_utl_reg {
/* DTL */
u64 ce_dtl_dtdr_credit_limit; /* 0x00y000 */
u64 ce_dtl_dtdr_credit_force; /* 0x00y008 */
u64 ce_dtl_dyn_credit_status; /* 0x00y010 */
u64 ce_dtl_dtl_last_credit_stat; /* 0x00y018 */
u64 ce_dtl_dtl_ctrl; /* 0x00y020 */
u64 ce_pad_00y028[5]; /* 0x00y028 -- 0x00y048 */
u64 ce_dtl_debug_sel; /* 0x00y050 */
u64 ce_pad_00y058[501]; /* 0x00y058 -- 0x00yFF8 */
/* UTL */
u64 ce_utl_utl_ctrl; /* 0x00z000 */
u64 ce_utl_debug_sel; /* 0x00z008 */
u64 ce_pad_00z010[510]; /* 0x00z010 -- 0x00zFF8 */
} ce_dtl_utl[2];
u64 ce_pad_00A000[514]; /* 0x00A000 -- 0x00B008 */
/*
* URE: Upstream Request Engine
*/
u64 ce_ure_dyn_credit_status; /* 0x00B010 */
u64 ce_ure_last_credit_status; /* 0x00B018 */
u64 ce_ure_credit_limit; /* 0x00B020 */
u64 ce_pad_00B028; /* 0x00B028 */
u64 ce_ure_control; /* 0x00B030 */
u64 ce_ure_status; /* 0x00B038 */
u64 ce_pad_00B040[2]; /* 0x00B040 -- 0x00B048 */
u64 ce_ure_debug_sel; /* 0x00B050 */
u64 ce_ure_pcie_debug_sel; /* 0x00B058 */
u64 ce_ure_ssp_err_cmd_wrd; /* 0x00B060 */
u64 ce_ure_ssp_err_addr; /* 0x00B068 */
u64 ce_ure_page_map; /* 0x00B070 */
u64 ce_ure_dir_map[TIOCE_NUM_PORTS]; /* 0x00B078 */
u64 ce_ure_pipe_sel1; /* 0x00B088 */
u64 ce_ure_pipe_mask1; /* 0x00B090 */
u64 ce_ure_pipe_sel2; /* 0x00B098 */
u64 ce_ure_pipe_mask2; /* 0x00B0A0 */
u64 ce_ure_pcie1_credits_sent; /* 0x00B0A8 */
u64 ce_ure_pcie1_credits_used; /* 0x00B0B0 */
u64 ce_ure_pcie1_credit_limit; /* 0x00B0B8 */
u64 ce_ure_pcie2_credits_sent; /* 0x00B0C0 */
u64 ce_ure_pcie2_credits_used; /* 0x00B0C8 */
u64 ce_ure_pcie2_credit_limit; /* 0x00B0D0 */
u64 ce_ure_pcie_force_credit; /* 0x00B0D8 */
u64 ce_ure_rd_tnum_val; /* 0x00B0E0 */
u64 ce_ure_rd_tnum_rsp_rcvd; /* 0x00B0E8 */
u64 ce_ure_rd_tnum_esent_timer; /* 0x00B0F0 */
u64 ce_ure_rd_tnum_error; /* 0x00B0F8 */
u64 ce_ure_rd_tnum_first_cl; /* 0x00B100 */
u64 ce_ure_rd_tnum_link_buf; /* 0x00B108 */
u64 ce_ure_wr_tnum_val; /* 0x00B110 */
u64 ce_ure_sram_err_addr0; /* 0x00B118 */
u64 ce_ure_sram_err_addr1; /* 0x00B120 */
u64 ce_ure_sram_err_addr2; /* 0x00B128 */
u64 ce_ure_sram_rd_addr0; /* 0x00B130 */
u64 ce_ure_sram_rd_addr1; /* 0x00B138 */
u64 ce_ure_sram_rd_addr2; /* 0x00B140 */
u64 ce_ure_sram_wr_addr0; /* 0x00B148 */
u64 ce_ure_sram_wr_addr1; /* 0x00B150 */
u64 ce_ure_sram_wr_addr2; /* 0x00B158 */
u64 ce_ure_buf_flush10; /* 0x00B160 */
u64 ce_ure_buf_flush11; /* 0x00B168 */
u64 ce_ure_buf_flush12; /* 0x00B170 */
u64 ce_ure_buf_flush13; /* 0x00B178 */
u64 ce_ure_buf_flush20; /* 0x00B180 */
u64 ce_ure_buf_flush21; /* 0x00B188 */
u64 ce_ure_buf_flush22; /* 0x00B190 */
u64 ce_ure_buf_flush23; /* 0x00B198 */
u64 ce_ure_pcie_control1; /* 0x00B1A0 */
u64 ce_ure_pcie_control2; /* 0x00B1A8 */
u64 ce_pad_00B1B0[458]; /* 0x00B1B0 -- 0x00BFF8 */
/* Upstream Data Buffer, Port1 */
struct ce_ure_maint_ups_dat1_data {
u64 data63_0[512]; /* 0x00C000 -- 0x00CFF8 */
u64 data127_64[512]; /* 0x00D000 -- 0x00DFF8 */
u64 parity[512]; /* 0x00E000 -- 0x00EFF8 */
} ce_ure_maint_ups_dat1;
/* Upstream Header Buffer, Port1 */
struct ce_ure_maint_ups_hdr1_data {
u64 data63_0[512]; /* 0x00F000 -- 0x00FFF8 */
u64 data127_64[512]; /* 0x010000 -- 0x010FF8 */
u64 parity[512]; /* 0x011000 -- 0x011FF8 */
} ce_ure_maint_ups_hdr1;
/* Upstream Data Buffer, Port2 */
struct ce_ure_maint_ups_dat2_data {
u64 data63_0[512]; /* 0x012000 -- 0x012FF8 */
u64 data127_64[512]; /* 0x013000 -- 0x013FF8 */
u64 parity[512]; /* 0x014000 -- 0x014FF8 */
} ce_ure_maint_ups_dat2;
/* Upstream Header Buffer, Port2 */
struct ce_ure_maint_ups_hdr2_data {
u64 data63_0[512]; /* 0x015000 -- 0x015FF8 */
u64 data127_64[512]; /* 0x016000 -- 0x016FF8 */
u64 parity[512]; /* 0x017000 -- 0x017FF8 */
} ce_ure_maint_ups_hdr2;
/* Downstream Data Buffer */
struct ce_ure_maint_dns_dat_data {
u64 data63_0[512]; /* 0x018000 -- 0x018FF8 */
u64 data127_64[512]; /* 0x019000 -- 0x019FF8 */
u64 parity[512]; /* 0x01A000 -- 0x01AFF8 */
} ce_ure_maint_dns_dat;
/* Downstream Header Buffer */
struct ce_ure_maint_dns_hdr_data {
u64 data31_0[64]; /* 0x01B000 -- 0x01B1F8 */
u64 data95_32[64]; /* 0x01B200 -- 0x01B3F8 */
u64 parity[64]; /* 0x01B400 -- 0x01B5F8 */
} ce_ure_maint_dns_hdr;
/* RCI Buffer Data */
struct ce_ure_maint_rci_data {
u64 data41_0[64]; /* 0x01B600 -- 0x01B7F8 */
u64 data69_42[64]; /* 0x01B800 -- 0x01B9F8 */
} ce_ure_maint_rci;
/* Response Queue */
u64 ce_ure_maint_rspq[64]; /* 0x01BA00 -- 0x01BBF8 */
u64 ce_pad_01C000[4224]; /* 0x01BC00 -- 0x023FF8 */
/* Admin Build-a-Packet Buffer */
struct ce_adm_maint_bap_buf_data {
u64 data63_0[258]; /* 0x024000 -- 0x024808 */
u64 data127_64[258]; /* 0x024810 -- 0x025018 */
u64 parity[258]; /* 0x025020 -- 0x025828 */
} ce_adm_maint_bap_buf;
u64 ce_pad_025830[5370]; /* 0x025830 -- 0x02FFF8 */
/* URE: 40bit PMU ATE Buffer */ /* 0x030000 -- 0x037FF8 */
u64 ce_ure_ate40[TIOCE_NUM_M40_ATES];
/* URE: 32/40bit PMU ATE Buffer */ /* 0x038000 -- 0x03BFF8 */
u64 ce_ure_ate3240[TIOCE_NUM_M3240_ATES];
u64 ce_pad_03C000[2050]; /* 0x03C000 -- 0x040008 */
/*
* DRE: Down Stream Request Engine
*/
u64 ce_dre_dyn_credit_status1; /* 0x040010 */
u64 ce_dre_dyn_credit_status2; /* 0x040018 */
u64 ce_dre_last_credit_status1; /* 0x040020 */
u64 ce_dre_last_credit_status2; /* 0x040028 */
u64 ce_dre_credit_limit1; /* 0x040030 */
u64 ce_dre_credit_limit2; /* 0x040038 */
u64 ce_dre_force_credit1; /* 0x040040 */
u64 ce_dre_force_credit2; /* 0x040048 */
u64 ce_dre_debug_mux1; /* 0x040050 */
u64 ce_dre_debug_mux2; /* 0x040058 */
u64 ce_dre_ssp_err_cmd_wrd; /* 0x040060 */
u64 ce_dre_ssp_err_addr; /* 0x040068 */
u64 ce_dre_comp_err_cmd_wrd; /* 0x040070 */
u64 ce_dre_comp_err_addr; /* 0x040078 */
u64 ce_dre_req_status; /* 0x040080 */
u64 ce_dre_config1; /* 0x040088 */
u64 ce_dre_config2; /* 0x040090 */
u64 ce_dre_config_req_status; /* 0x040098 */
u64 ce_pad_0400A0[12]; /* 0x0400A0 -- 0x0400F8 */
u64 ce_dre_dyn_fifo; /* 0x040100 */
u64 ce_pad_040108[3]; /* 0x040108 -- 0x040118 */
u64 ce_dre_last_fifo; /* 0x040120 */
u64 ce_pad_040128[27]; /* 0x040128 -- 0x0401F8 */
/* DRE Downstream Head Queue */
struct ce_dre_maint_ds_head_queue {
u64 data63_0[32]; /* 0x040200 -- 0x0402F8 */
u64 data127_64[32]; /* 0x040300 -- 0x0403F8 */
u64 parity[32]; /* 0x040400 -- 0x0404F8 */
} ce_dre_maint_ds_head_q;
u64 ce_pad_040500[352]; /* 0x040500 -- 0x040FF8 */
/* DRE Downstream Data Queue */
struct ce_dre_maint_ds_data_queue {
u64 data63_0[256]; /* 0x041000 -- 0x0417F8 */
u64 ce_pad_041800[256]; /* 0x041800 -- 0x041FF8 */
u64 data127_64[256]; /* 0x042000 -- 0x0427F8 */
u64 ce_pad_042800[256]; /* 0x042800 -- 0x042FF8 */
u64 parity[256]; /* 0x043000 -- 0x0437F8 */
u64 ce_pad_043800[256]; /* 0x043800 -- 0x043FF8 */
} ce_dre_maint_ds_data_q;
/* DRE URE Upstream Response Queue */
struct ce_dre_maint_ure_us_rsp_queue {
u64 data63_0[8]; /* 0x044000 -- 0x044038 */
u64 ce_pad_044040[24]; /* 0x044040 -- 0x0440F8 */
u64 data127_64[8]; /* 0x044100 -- 0x044138 */
u64 ce_pad_044140[24]; /* 0x044140 -- 0x0441F8 */
u64 parity[8]; /* 0x044200 -- 0x044238 */
u64 ce_pad_044240[24]; /* 0x044240 -- 0x0442F8 */
} ce_dre_maint_ure_us_rsp_q;
u64 ce_dre_maint_us_wrt_rsp[32];/* 0x044300 -- 0x0443F8 */
u64 ce_end_of_struct; /* 0x044400 */
} tioce_t;
/* ce_lsiX_gb_cfg1 register bit masks & shifts */
#define CE_LSI_GB_CFG1_RXL0S_THS_SHFT 0
#define CE_LSI_GB_CFG1_RXL0S_THS_MASK (0xffULL << 0)
#define CE_LSI_GB_CFG1_RXL0S_SMP_SHFT 8
#define CE_LSI_GB_CFG1_RXL0S_SMP_MASK (0xfULL << 8)
#define CE_LSI_GB_CFG1_RXL0S_ADJ_SHFT 12
#define CE_LSI_GB_CFG1_RXL0S_ADJ_MASK (0x7ULL << 12)
#define CE_LSI_GB_CFG1_RXL0S_FLT_SHFT 15
#define CE_LSI_GB_CFG1_RXL0S_FLT_MASK (0x1ULL << 15)
#define CE_LSI_GB_CFG1_LPBK_SEL_SHFT 16
#define CE_LSI_GB_CFG1_LPBK_SEL_MASK (0x3ULL << 16)
#define CE_LSI_GB_CFG1_LPBK_EN_SHFT 18
#define CE_LSI_GB_CFG1_LPBK_EN_MASK (0x1ULL << 18)
#define CE_LSI_GB_CFG1_RVRS_LB_SHFT 19
#define CE_LSI_GB_CFG1_RVRS_LB_MASK (0x1ULL << 19)
#define CE_LSI_GB_CFG1_RVRS_CLK_SHFT 20
#define CE_LSI_GB_CFG1_RVRS_CLK_MASK (0x3ULL << 20)
#define CE_LSI_GB_CFG1_SLF_TS_SHFT 24
#define CE_LSI_GB_CFG1_SLF_TS_MASK (0xfULL << 24)
/* ce_adm_int_mask/ce_adm_int_status register bit defines */
#define CE_ADM_INT_CE_ERROR_SHFT 0
#define CE_ADM_INT_LSI1_IP_ERROR_SHFT 1
#define CE_ADM_INT_LSI2_IP_ERROR_SHFT 2
#define CE_ADM_INT_PCIE_ERROR_SHFT 3
#define CE_ADM_INT_PORT1_HOTPLUG_EVENT_SHFT 4
#define CE_ADM_INT_PORT2_HOTPLUG_EVENT_SHFT 5
#define CE_ADM_INT_PCIE_PORT1_DEV_A_SHFT 6
#define CE_ADM_INT_PCIE_PORT1_DEV_B_SHFT 7
#define CE_ADM_INT_PCIE_PORT1_DEV_C_SHFT 8
#define CE_ADM_INT_PCIE_PORT1_DEV_D_SHFT 9
#define CE_ADM_INT_PCIE_PORT2_DEV_A_SHFT 10
#define CE_ADM_INT_PCIE_PORT2_DEV_B_SHFT 11
#define CE_ADM_INT_PCIE_PORT2_DEV_C_SHFT 12
#define CE_ADM_INT_PCIE_PORT2_DEV_D_SHFT 13
#define CE_ADM_INT_PCIE_MSG_SHFT 14 /*see int_dest_14*/
#define CE_ADM_INT_PCIE_MSG_SLOT_0_SHFT 14
#define CE_ADM_INT_PCIE_MSG_SLOT_1_SHFT 15
#define CE_ADM_INT_PCIE_MSG_SLOT_2_SHFT 16
#define CE_ADM_INT_PCIE_MSG_SLOT_3_SHFT 17
#define CE_ADM_INT_PORT1_PM_PME_MSG_SHFT 22
#define CE_ADM_INT_PORT2_PM_PME_MSG_SHFT 23
/* ce_adm_force_int register bit defines */
#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_A_SHFT 0
#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_B_SHFT 1
#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_C_SHFT 2
#define CE_ADM_FORCE_INT_PCIE_PORT1_DEV_D_SHFT 3
#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_A_SHFT 4
#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_B_SHFT 5
#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_C_SHFT 6
#define CE_ADM_FORCE_INT_PCIE_PORT2_DEV_D_SHFT 7
#define CE_ADM_FORCE_INT_ALWAYS_SHFT 8
/* ce_adm_int_dest register bit masks & shifts */
#define INTR_VECTOR_SHFT 56
/* ce_adm_error_mask and ce_adm_error_summary register bit masks */
#define CE_ADM_ERR_CRM_SSP_REQ_INVALID (0x1ULL << 0)
#define CE_ADM_ERR_SSP_REQ_HEADER (0x1ULL << 1)
#define CE_ADM_ERR_SSP_RSP_HEADER (0x1ULL << 2)
#define CE_ADM_ERR_SSP_PROTOCOL_ERROR (0x1ULL << 3)
#define CE_ADM_ERR_SSP_SBE (0x1ULL << 4)
#define CE_ADM_ERR_SSP_MBE (0x1ULL << 5)
#define CE_ADM_ERR_CXM_CREDIT_OFLOW (0x1ULL << 6)
#define CE_ADM_ERR_DRE_SSP_REQ_INVAL (0x1ULL << 7)
#define CE_ADM_ERR_SSP_REQ_LONG (0x1ULL << 8)
#define CE_ADM_ERR_SSP_REQ_OFLOW (0x1ULL << 9)
#define CE_ADM_ERR_SSP_REQ_SHORT (0x1ULL << 10)
#define CE_ADM_ERR_SSP_REQ_SIDEBAND (0x1ULL << 11)
#define CE_ADM_ERR_SSP_REQ_ADDR_ERR (0x1ULL << 12)
#define CE_ADM_ERR_SSP_REQ_BAD_BE (0x1ULL << 13)
#define CE_ADM_ERR_PCIE_COMPL_TIMEOUT (0x1ULL << 14)
#define CE_ADM_ERR_PCIE_UNEXP_COMPL (0x1ULL << 15)
#define CE_ADM_ERR_PCIE_ERR_COMPL (0x1ULL << 16)
#define CE_ADM_ERR_DRE_CREDIT_OFLOW (0x1ULL << 17)
#define CE_ADM_ERR_DRE_SRAM_PE (0x1ULL << 18)
#define CE_ADM_ERR_SSP_RSP_INVALID (0x1ULL << 19)
#define CE_ADM_ERR_SSP_RSP_LONG (0x1ULL << 20)
#define CE_ADM_ERR_SSP_RSP_SHORT (0x1ULL << 21)
#define CE_ADM_ERR_SSP_RSP_SIDEBAND (0x1ULL << 22)
#define CE_ADM_ERR_URE_SSP_RSP_UNEXP (0x1ULL << 23)
#define CE_ADM_ERR_URE_SSP_WR_REQ_TIMEOUT (0x1ULL << 24)
#define CE_ADM_ERR_URE_SSP_RD_REQ_TIMEOUT (0x1ULL << 25)
#define CE_ADM_ERR_URE_ATE3240_PAGE_FAULT (0x1ULL << 26)
#define CE_ADM_ERR_URE_ATE40_PAGE_FAULT (0x1ULL << 27)
#define CE_ADM_ERR_URE_CREDIT_OFLOW (0x1ULL << 28)
#define CE_ADM_ERR_URE_SRAM_PE (0x1ULL << 29)
#define CE_ADM_ERR_ADM_SSP_RSP_UNEXP (0x1ULL << 30)
#define CE_ADM_ERR_ADM_SSP_REQ_TIMEOUT (0x1ULL << 31)
#define CE_ADM_ERR_MMR_ACCESS_ERROR (0x1ULL << 32)
#define CE_ADM_ERR_MMR_ADDR_ERROR (0x1ULL << 33)
#define CE_ADM_ERR_ADM_CREDIT_OFLOW (0x1ULL << 34)
#define CE_ADM_ERR_ADM_SRAM_PE (0x1ULL << 35)
#define CE_ADM_ERR_DTL1_MIN_PDATA_CREDIT_ERR (0x1ULL << 36)
#define CE_ADM_ERR_DTL1_INF_COMPL_CRED_UPDT_ERR (0x1ULL << 37)
#define CE_ADM_ERR_DTL1_INF_POSTED_CRED_UPDT_ERR (0x1ULL << 38)
#define CE_ADM_ERR_DTL1_INF_NPOSTED_CRED_UPDT_ERR (0x1ULL << 39)
#define CE_ADM_ERR_DTL1_COMP_HD_CRED_MAX_ERR (0x1ULL << 40)
#define CE_ADM_ERR_DTL1_COMP_D_CRED_MAX_ERR (0x1ULL << 41)
#define CE_ADM_ERR_DTL1_NPOSTED_HD_CRED_MAX_ERR (0x1ULL << 42)
#define CE_ADM_ERR_DTL1_NPOSTED_D_CRED_MAX_ERR (0x1ULL << 43)
#define CE_ADM_ERR_DTL1_POSTED_HD_CRED_MAX_ERR (0x1ULL << 44)
#define CE_ADM_ERR_DTL1_POSTED_D_CRED_MAX_ERR (0x1ULL << 45)
#define CE_ADM_ERR_DTL2_MIN_PDATA_CREDIT_ERR (0x1ULL << 46)
#define CE_ADM_ERR_DTL2_INF_COMPL_CRED_UPDT_ERR (0x1ULL << 47)
#define CE_ADM_ERR_DTL2_INF_POSTED_CRED_UPDT_ERR (0x1ULL << 48)
#define CE_ADM_ERR_DTL2_INF_NPOSTED_CRED_UPDT_ERR (0x1ULL << 49)
#define CE_ADM_ERR_DTL2_COMP_HD_CRED_MAX_ERR (0x1ULL << 50)
#define CE_ADM_ERR_DTL2_COMP_D_CRED_MAX_ERR (0x1ULL << 51)
#define CE_ADM_ERR_DTL2_NPOSTED_HD_CRED_MAX_ERR (0x1ULL << 52)
#define CE_ADM_ERR_DTL2_NPOSTED_D_CRED_MAX_ERR (0x1ULL << 53)
#define CE_ADM_ERR_DTL2_POSTED_HD_CRED_MAX_ERR (0x1ULL << 54)
#define CE_ADM_ERR_DTL2_POSTED_D_CRED_MAX_ERR (0x1ULL << 55)
#define CE_ADM_ERR_PORT1_PCIE_COR_ERR (0x1ULL << 56)
#define CE_ADM_ERR_PORT1_PCIE_NFAT_ERR (0x1ULL << 57)
#define CE_ADM_ERR_PORT1_PCIE_FAT_ERR (0x1ULL << 58)
#define CE_ADM_ERR_PORT2_PCIE_COR_ERR (0x1ULL << 59)
#define CE_ADM_ERR_PORT2_PCIE_NFAT_ERR (0x1ULL << 60)
#define CE_ADM_ERR_PORT2_PCIE_FAT_ERR (0x1ULL << 61)
/* ce_adm_ure_ups_buf_barrier_flush register bit masks and shifts */
#define FLUSH_SEL_PORT1_PIPE0_SHFT 0
#define FLUSH_SEL_PORT1_PIPE1_SHFT 4
#define FLUSH_SEL_PORT1_PIPE2_SHFT 8
#define FLUSH_SEL_PORT1_PIPE3_SHFT 12
#define FLUSH_SEL_PORT2_PIPE0_SHFT 16
#define FLUSH_SEL_PORT2_PIPE1_SHFT 20
#define FLUSH_SEL_PORT2_PIPE2_SHFT 24
#define FLUSH_SEL_PORT2_PIPE3_SHFT 28
/* ce_dre_config1 register bit masks and shifts */
#define CE_DRE_RO_ENABLE (0x1ULL << 0)
#define CE_DRE_DYN_RO_ENABLE (0x1ULL << 1)
#define CE_DRE_SUP_CONFIG_COMP_ERROR (0x1ULL << 2)
#define CE_DRE_SUP_IO_COMP_ERROR (0x1ULL << 3)
#define CE_DRE_ADDR_MODE_SHFT 4
/* ce_dre_config_req_status register bit masks */
#define CE_DRE_LAST_CONFIG_COMPLETION (0x7ULL << 0)
#define CE_DRE_DOWNSTREAM_CONFIG_ERROR (0x1ULL << 3)
#define CE_DRE_CONFIG_COMPLETION_VALID (0x1ULL << 4)
#define CE_DRE_CONFIG_REQUEST_ACTIVE (0x1ULL << 5)
/* ce_ure_control register bit masks & shifts */
#define CE_URE_RD_MRG_ENABLE (0x1ULL << 0)
#define CE_URE_WRT_MRG_ENABLE1 (0x1ULL << 4)
#define CE_URE_WRT_MRG_ENABLE2 (0x1ULL << 5)
#define CE_URE_WRT_MRG_TIMER_SHFT 12
#define CE_URE_WRT_MRG_TIMER_MASK (0x7FFULL << CE_URE_WRT_MRG_TIMER_SHFT)
#define CE_URE_WRT_MRG_TIMER(x) (((u64)(x) << \
CE_URE_WRT_MRG_TIMER_SHFT) & \
CE_URE_WRT_MRG_TIMER_MASK)
#define CE_URE_RSPQ_BYPASS_DISABLE (0x1ULL << 24)
#define CE_URE_UPS_DAT1_PAR_DISABLE (0x1ULL << 32)
#define CE_URE_UPS_HDR1_PAR_DISABLE (0x1ULL << 33)
#define CE_URE_UPS_DAT2_PAR_DISABLE (0x1ULL << 34)
#define CE_URE_UPS_HDR2_PAR_DISABLE (0x1ULL << 35)
#define CE_URE_ATE_PAR_DISABLE (0x1ULL << 36)
#define CE_URE_RCI_PAR_DISABLE (0x1ULL << 37)
#define CE_URE_RSPQ_PAR_DISABLE (0x1ULL << 38)
#define CE_URE_DNS_DAT_PAR_DISABLE (0x1ULL << 39)
#define CE_URE_DNS_HDR_PAR_DISABLE (0x1ULL << 40)
#define CE_URE_MALFORM_DISABLE (0x1ULL << 44)
#define CE_URE_UNSUP_DISABLE (0x1ULL << 45)
/* ce_ure_page_map register bit masks & shifts */
#define CE_URE_ATE3240_ENABLE (0x1ULL << 0)
#define CE_URE_ATE40_ENABLE (0x1ULL << 1)
#define CE_URE_PAGESIZE_SHFT 4
#define CE_URE_PAGESIZE_MASK (0x7ULL << CE_URE_PAGESIZE_SHFT)
#define CE_URE_4K_PAGESIZE (0x0ULL << CE_URE_PAGESIZE_SHFT)
#define CE_URE_16K_PAGESIZE (0x1ULL << CE_URE_PAGESIZE_SHFT)
#define CE_URE_64K_PAGESIZE (0x2ULL << CE_URE_PAGESIZE_SHFT)
#define CE_URE_128K_PAGESIZE (0x3ULL << CE_URE_PAGESIZE_SHFT)
#define CE_URE_256K_PAGESIZE (0x4ULL << CE_URE_PAGESIZE_SHFT)
/* ce_ure_pipe_sel register bit masks & shifts */
#define PKT_TRAFIC_SHRT 16
#define BUS_SRC_ID_SHFT 8
#define DEV_SRC_ID_SHFT 3
#define FNC_SRC_ID_SHFT 0
#define CE_URE_TC_MASK (0x07ULL << PKT_TRAFIC_SHRT)
#define CE_URE_BUS_MASK (0xFFULL << BUS_SRC_ID_SHFT)
#define CE_URE_DEV_MASK (0x1FULL << DEV_SRC_ID_SHFT)
#define CE_URE_FNC_MASK (0x07ULL << FNC_SRC_ID_SHFT)
#define CE_URE_PIPE_BUS(b) (((u64)(b) << BUS_SRC_ID_SHFT) & \
CE_URE_BUS_MASK)
#define CE_URE_PIPE_DEV(d) (((u64)(d) << DEV_SRC_ID_SHFT) & \
CE_URE_DEV_MASK)
#define CE_URE_PIPE_FNC(f) (((u64)(f) << FNC_SRC_ID_SHFT) & \
CE_URE_FNC_MASK)
#define CE_URE_SEL1_SHFT 0
#define CE_URE_SEL2_SHFT 20
#define CE_URE_SEL3_SHFT 40
#define CE_URE_SEL1_MASK (0x7FFFFULL << CE_URE_SEL1_SHFT)
#define CE_URE_SEL2_MASK (0x7FFFFULL << CE_URE_SEL2_SHFT)
#define CE_URE_SEL3_MASK (0x7FFFFULL << CE_URE_SEL3_SHFT)
/* ce_ure_pipe_mask register bit masks & shifts */
#define CE_URE_MASK1_SHFT 0
#define CE_URE_MASK2_SHFT 20
#define CE_URE_MASK3_SHFT 40
#define CE_URE_MASK1_MASK (0x7FFFFULL << CE_URE_MASK1_SHFT)
#define CE_URE_MASK2_MASK (0x7FFFFULL << CE_URE_MASK2_SHFT)
#define CE_URE_MASK3_MASK (0x7FFFFULL << CE_URE_MASK3_SHFT)
/* ce_ure_pcie_control1 register bit masks & shifts */
#define CE_URE_SI (0x1ULL << 0)
#define CE_URE_ELAL_SHFT 4
#define CE_URE_ELAL_MASK (0x7ULL << CE_URE_ELAL_SHFT)
#define CE_URE_ELAL_SET(n) (((u64)(n) << CE_URE_ELAL_SHFT) & \
CE_URE_ELAL_MASK)
#define CE_URE_ELAL1_SHFT 8
#define CE_URE_ELAL1_MASK (0x7ULL << CE_URE_ELAL1_SHFT)
#define CE_URE_ELAL1_SET(n) (((u64)(n) << CE_URE_ELAL1_SHFT) & \
CE_URE_ELAL1_MASK)
#define CE_URE_SCC (0x1ULL << 12)
#define CE_URE_PN1_SHFT 16
#define CE_URE_PN1_MASK (0xFFULL << CE_URE_PN1_SHFT)
#define CE_URE_PN2_SHFT 24
#define CE_URE_PN2_MASK (0xFFULL << CE_URE_PN2_SHFT)
#define CE_URE_PN1_SET(n) (((u64)(n) << CE_URE_PN1_SHFT) & \
CE_URE_PN1_MASK)
#define CE_URE_PN2_SET(n) (((u64)(n) << CE_URE_PN2_SHFT) & \
CE_URE_PN2_MASK)
/* ce_ure_pcie_control2 register bit masks & shifts */
#define CE_URE_ABP (0x1ULL << 0)
#define CE_URE_PCP (0x1ULL << 1)
#define CE_URE_MSP (0x1ULL << 2)
#define CE_URE_AIP (0x1ULL << 3)
#define CE_URE_PIP (0x1ULL << 4)
#define CE_URE_HPS (0x1ULL << 5)
#define CE_URE_HPC (0x1ULL << 6)
#define CE_URE_SPLV_SHFT 7
#define CE_URE_SPLV_MASK (0xFFULL << CE_URE_SPLV_SHFT)
#define CE_URE_SPLV_SET(n) (((u64)(n) << CE_URE_SPLV_SHFT) & \
CE_URE_SPLV_MASK)
#define CE_URE_SPLS_SHFT 15
#define CE_URE_SPLS_MASK (0x3ULL << CE_URE_SPLS_SHFT)
#define CE_URE_SPLS_SET(n) (((u64)(n) << CE_URE_SPLS_SHFT) & \
CE_URE_SPLS_MASK)
#define CE_URE_PSN1_SHFT 19
#define CE_URE_PSN1_MASK (0x1FFFULL << CE_URE_PSN1_SHFT)
#define CE_URE_PSN2_SHFT 32
#define CE_URE_PSN2_MASK (0x1FFFULL << CE_URE_PSN2_SHFT)
#define CE_URE_PSN1_SET(n) (((u64)(n) << CE_URE_PSN1_SHFT) & \
CE_URE_PSN1_MASK)
#define CE_URE_PSN2_SET(n) (((u64)(n) << CE_URE_PSN2_SHFT) & \
CE_URE_PSN2_MASK)
/*
* PIO address space ranges for CE
*/
/* Local CE Registers Space */
#define CE_PIO_MMR 0x00000000
#define CE_PIO_MMR_LEN 0x04000000
/* PCI Compatible Config Space */
#define CE_PIO_CONFIG_SPACE 0x04000000
#define CE_PIO_CONFIG_SPACE_LEN 0x04000000
/* PCI I/O Space Alias */
#define CE_PIO_IO_SPACE_ALIAS 0x08000000
#define CE_PIO_IO_SPACE_ALIAS_LEN 0x08000000
/* PCI Enhanced Config Space */
#define CE_PIO_E_CONFIG_SPACE 0x10000000
#define CE_PIO_E_CONFIG_SPACE_LEN 0x10000000
/* PCI I/O Space */
#define CE_PIO_IO_SPACE 0x100000000
#define CE_PIO_IO_SPACE_LEN 0x100000000
/* PCI MEM Space */
#define CE_PIO_MEM_SPACE 0x200000000
#define CE_PIO_MEM_SPACE_LEN TIO_HWIN_SIZE
/*
* CE PCI Enhanced Config Space shifts & masks
*/
#define CE_E_CONFIG_BUS_SHFT 20
#define CE_E_CONFIG_BUS_MASK (0xFF << CE_E_CONFIG_BUS_SHFT)
#define CE_E_CONFIG_DEVICE_SHFT 15
#define CE_E_CONFIG_DEVICE_MASK (0x1F << CE_E_CONFIG_DEVICE_SHFT)
#define CE_E_CONFIG_FUNC_SHFT 12
#define CE_E_CONFIG_FUNC_MASK (0x7 << CE_E_CONFIG_FUNC_SHFT)
#endif /* __ASM_IA64_SN_TIOCE_H__ */

View file

@ -1,63 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2003-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_CE_PROVIDER_H
#define _ASM_IA64_SN_CE_PROVIDER_H
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/tioce.h>
/*
* Common TIOCE structure shared between the prom and kernel
*
* DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES TO THE
* PROM VERSION.
*/
struct tioce_common {
struct pcibus_bussoft ce_pcibus; /* common pciio header */
u32 ce_rev;
u64 ce_kernel_private;
u64 ce_prom_private;
};
struct tioce_kernel {
struct tioce_common *ce_common;
spinlock_t ce_lock;
struct list_head ce_dmamap_list;
u64 ce_ate40_shadow[TIOCE_NUM_M40_ATES];
u64 ce_ate3240_shadow[TIOCE_NUM_M3240_ATES];
u32 ce_ate3240_pagesize;
u8 ce_port1_secondary;
/* per-port resources */
struct {
int dirmap_refcnt;
u64 dirmap_shadow;
} ce_port[TIOCE_NUM_PORTS];
};
struct tioce_dmamap {
struct list_head ce_dmamap_list; /* headed by tioce_kernel */
u32 refcnt;
u64 nbytes; /* # bytes mapped */
u64 ct_start; /* coretalk start address */
u64 pci_start; /* bus start address */
u64 __iomem *ate_hw;/* hw ptr of first ate in map */
u64 *ate_shadow; /* shadow ptr of firat ate */
u16 ate_count; /* # ate's in the map */
};
extern int tioce_init_provider(void);
#endif /* __ASM_IA64_SN_CE_PROVIDER_H */

View file

@ -1,257 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_TIOCP_H
#define _ASM_IA64_SN_PCI_TIOCP_H
#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL
#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
#define TIOCP_PCI64_CMDTYPE_MSI (0x3ull << 60)
/*****************************************************************************
*********************** TIOCP MMR structure mapping ***************************
*****************************************************************************/
struct tiocp{
/* 0x000000-0x00FFFF -- Local Registers */
/* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */
u64 cp_id; /* 0x000000 */
u64 cp_stat; /* 0x000008 */
u64 cp_err_upper; /* 0x000010 */
u64 cp_err_lower; /* 0x000018 */
#define cp_err cp_err_lower
u64 cp_control; /* 0x000020 */
u64 cp_req_timeout; /* 0x000028 */
u64 cp_intr_upper; /* 0x000030 */
u64 cp_intr_lower; /* 0x000038 */
#define cp_intr cp_intr_lower
u64 cp_err_cmdword; /* 0x000040 */
u64 _pad_000048; /* 0x000048 */
u64 cp_tflush; /* 0x000050 */
/* 0x000058-0x00007F -- Bridge-specific Configuration */
u64 cp_aux_err; /* 0x000058 */
u64 cp_resp_upper; /* 0x000060 */
u64 cp_resp_lower; /* 0x000068 */
#define cp_resp cp_resp_lower
u64 cp_tst_pin_ctrl; /* 0x000070 */
u64 cp_addr_lkerr; /* 0x000078 */
/* 0x000080-0x00008F -- PMU & MAP */
u64 cp_dir_map; /* 0x000080 */
u64 _pad_000088; /* 0x000088 */
/* 0x000090-0x00009F -- SSRAM */
u64 cp_map_fault; /* 0x000090 */
u64 _pad_000098; /* 0x000098 */
/* 0x0000A0-0x0000AF -- Arbitration */
u64 cp_arb; /* 0x0000A0 */
u64 _pad_0000A8; /* 0x0000A8 */
/* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
u64 cp_ate_parity_err; /* 0x0000B0 */
u64 _pad_0000B8; /* 0x0000B8 */
/* 0x0000C0-0x0000FF -- PCI/GIO */
u64 cp_bus_timeout; /* 0x0000C0 */
u64 cp_pci_cfg; /* 0x0000C8 */
u64 cp_pci_err_upper; /* 0x0000D0 */
u64 cp_pci_err_lower; /* 0x0000D8 */
#define cp_pci_err cp_pci_err_lower
u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */
/* 0x000100-0x0001FF -- Interrupt */
u64 cp_int_status; /* 0x000100 */
u64 cp_int_enable; /* 0x000108 */
u64 cp_int_rst_stat; /* 0x000110 */
u64 cp_int_mode; /* 0x000118 */
u64 cp_int_device; /* 0x000120 */
u64 cp_int_host_err; /* 0x000128 */
u64 cp_int_addr[8]; /* 0x0001{30,,,68} */
u64 cp_err_int_view; /* 0x000170 */
u64 cp_mult_int; /* 0x000178 */
u64 cp_force_always[8]; /* 0x0001{80,,,B8} */
u64 cp_force_pin[8]; /* 0x0001{C0,,,F8} */
/* 0x000200-0x000298 -- Device */
u64 cp_device[4]; /* 0x0002{00,,,18} */
u64 _pad_000220[4]; /* 0x0002{20,,,38} */
u64 cp_wr_req_buf[4]; /* 0x0002{40,,,58} */
u64 _pad_000260[4]; /* 0x0002{60,,,78} */
u64 cp_rrb_map[2]; /* 0x0002{80,,,88} */
#define cp_even_resp cp_rrb_map[0] /* 0x000280 */
#define cp_odd_resp cp_rrb_map[1] /* 0x000288 */
u64 cp_resp_status; /* 0x000290 */
u64 cp_resp_clear; /* 0x000298 */
u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */
/* 0x000300-0x0003F8 -- Buffer Address Match Registers */
struct {
u64 upper; /* 0x0003{00,,,F0} */
u64 lower; /* 0x0003{08,,,F8} */
} cp_buf_addr_match[16];
/* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
struct {
u64 flush_w_touch; /* 0x000{400,,,5C0} */
u64 flush_wo_touch; /* 0x000{408,,,5C8} */
u64 inflight; /* 0x000{410,,,5D0} */
u64 prefetch; /* 0x000{418,,,5D8} */
u64 total_pci_retry; /* 0x000{420,,,5E0} */
u64 max_pci_retry; /* 0x000{428,,,5E8} */
u64 max_latency; /* 0x000{430,,,5F0} */
u64 clear_all; /* 0x000{438,,,5F8} */
} cp_buf_count[8];
/* 0x000600-0x0009FF -- PCI/X registers */
u64 cp_pcix_bus_err_addr; /* 0x000600 */
u64 cp_pcix_bus_err_attr; /* 0x000608 */
u64 cp_pcix_bus_err_data; /* 0x000610 */
u64 cp_pcix_pio_split_addr; /* 0x000618 */
u64 cp_pcix_pio_split_attr; /* 0x000620 */
u64 cp_pcix_dma_req_err_attr; /* 0x000628 */
u64 cp_pcix_dma_req_err_addr; /* 0x000630 */
u64 cp_pcix_timeout; /* 0x000638 */
u64 _pad_000640[24]; /* 0x000{640,,,6F8} */
/* 0x000700-0x000737 -- Debug Registers */
u64 cp_ct_debug_ctl; /* 0x000700 */
u64 cp_br_debug_ctl; /* 0x000708 */
u64 cp_mux3_debug_ctl; /* 0x000710 */
u64 cp_mux4_debug_ctl; /* 0x000718 */
u64 cp_mux5_debug_ctl; /* 0x000720 */
u64 cp_mux6_debug_ctl; /* 0x000728 */
u64 cp_mux7_debug_ctl; /* 0x000730 */
u64 _pad_000738[89]; /* 0x000{738,,,9F8} */
/* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
struct {
u64 cp_buf_addr; /* 0x000{A00,,,AF0} */
u64 cp_buf_attr; /* 0X000{A08,,,AF8} */
} cp_pcix_read_buf_64[16];
struct {
u64 cp_buf_addr; /* 0x000{B00,,,BE0} */
u64 cp_buf_attr; /* 0x000{B08,,,BE8} */
u64 cp_buf_valid; /* 0x000{B10,,,BF0} */
u64 __pad1; /* 0x000{B18,,,BF8} */
} cp_pcix_write_buf_64[8];
/* End of Local Registers -- Start of Address Map space */
char _pad_000c00[0x010000 - 0x000c00];
/* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */
u64 cp_int_ate_ram[1024]; /* 0x010000-0x011FF8 */
char _pad_012000[0x14000 - 0x012000];
/* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */
u64 cp_int_ate_ram_mp[1024]; /* 0x014000-0x015FF8 */
char _pad_016000[0x18000 - 0x016000];
/* 0x18000-0x197F8 -- TIOCP Write Request Ram */
u64 cp_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
u64 cp_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
u64 cp_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
char _pad_019800[0x1C000 - 0x019800];
/* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */
u64 cp_rd_resp_lower[512]; /* 0x1C000 - 0x1CFF8 */
u64 cp_rd_resp_upper[512]; /* 0x1D000 - 0x1DFF8 */
u64 cp_rd_resp_parity[512]; /* 0x1E000 - 0x1EFF8 */
char _pad_01F000[0x20000 - 0x01F000];
/* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used) */
char _pad_020000[0x021000 - 0x20000];
/* 0x021000-0x027FFF -- PCI Device Configuration Spaces */
union {
u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
union {
u8 c[0x100 / 1];
u16 s[0x100 / 2];
u32 l[0x100 / 4];
u64 d[0x100 / 8];
} f[8];
} cp_type0_cfg_dev[7]; /* 0x02{1000,,,7FFF} */
/* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
union {
u8 c[0x1000 / 1]; /* 0x028000-0x029000 */
u16 s[0x1000 / 2]; /* 0x028000-0x029000 */
u32 l[0x1000 / 4]; /* 0x028000-0x029000 */
u64 d[0x1000 / 8]; /* 0x028000-0x029000 */
union {
u8 c[0x100 / 1];
u16 s[0x100 / 2];
u32 l[0x100 / 4];
u64 d[0x100 / 8];
} f[8];
} cp_type1_cfg; /* 0x028000-0x029000 */
char _pad_029000[0x030000-0x029000];
/* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
union {
u8 c[8 / 1];
u16 s[8 / 2];
u32 l[8 / 4];
u64 d[8 / 8];
} cp_pci_iack; /* 0x030000-0x030007 */
char _pad_030007[0x040000-0x030008];
/* 0x040000-0x040007 -- PCIX Special Cycle */
union {
u8 c[8 / 1];
u16 s[8 / 2];
u32 l[8 / 4];
u64 d[8 / 8];
} cp_pcix_cycle; /* 0x040000-0x040007 */
char _pad_040007[0x200000-0x040008];
/* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */
union {
u8 c[0x100000 / 1];
u16 s[0x100000 / 2];
u32 l[0x100000 / 4];
u64 d[0x100000 / 8];
} cp_devio_raw[6]; /* 0x200000-0x7FFFFF */
#define cp_devio(n) cp_devio_raw[((n)<2)?(n*2):(n+2)]
char _pad_800000[0xA00000-0x800000];
/* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush */
union {
u8 c[0x100000 / 1];
u16 s[0x100000 / 2];
u32 l[0x100000 / 4];
u64 d[0x100000 / 8];
} cp_devio_raw_flush[6]; /* 0xA00000-0xBFFFFF */
#define cp_devio_flush(n) cp_devio_raw_flush[((n)<2)?(n*2):(n+2)]
};
#endif /* _ASM_IA64_SN_PCI_TIOCP_H */

View file

@ -1,72 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_TIO_TIOCX_H
#define _ASM_IA64_SN_TIO_TIOCX_H
#ifdef __KERNEL__
struct cx_id_s {
unsigned int part_num;
unsigned int mfg_num;
int nasid;
};
struct cx_dev {
struct cx_id_s cx_id;
int bt; /* board/blade type */
void *soft; /* driver specific */
struct hubdev_info *hubdev;
struct device dev;
struct cx_drv *driver;
};
struct cx_device_id {
unsigned int part_num;
unsigned int mfg_num;
};
struct cx_drv {
char *name;
const struct cx_device_id *id_table;
struct device_driver driver;
int (*probe) (struct cx_dev * dev, const struct cx_device_id * id);
int (*remove) (struct cx_dev * dev);
};
/* create DMA address by stripping AS bits */
#define TIOCX_DMA_ADDR(a) (u64)((u64)(a) & 0xffffcfffffffffUL)
#define TIOCX_TO_TIOCX_DMA_ADDR(a) (u64)(((u64)(a) & 0xfffffffff) | \
((((u64)(a)) & 0xffffc000000000UL) <<2))
#define TIO_CE_ASIC_PARTNUM 0xce00
#define TIOCX_CORELET 3
/* These are taken from tio_mmr_as.h */
#define TIO_ICE_FRZ_CFG TIO_MMR_ADDR_MOD(0x00000000b0008100UL)
#define TIO_ICE_PMI_TX_CFG TIO_MMR_ADDR_MOD(0x00000000b000b100UL)
#define TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3 TIO_MMR_ADDR_MOD(0x00000000b000be18UL)
#define TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK 0x000000000000000fUL
#define to_cx_dev(n) container_of(n, struct cx_dev, dev)
#define to_cx_driver(drv) container_of(drv, struct cx_drv, driver)
extern struct sn_irq_info *tiocx_irq_alloc(nasid_t, int, int, nasid_t, int);
extern void tiocx_irq_free(struct sn_irq_info *);
extern int cx_device_unregister(struct cx_dev *);
extern int cx_device_register(nasid_t, int, int, struct hubdev_info *, int);
extern int cx_driver_unregister(struct cx_drv *);
extern int cx_driver_register(struct cx_drv *);
extern u64 tiocx_dma_addr(u64 addr);
extern u64 tiocx_swin_base(int nasid);
extern void tiocx_mmr_store(int nasid, u64 offset, u64 value);
extern u64 tiocx_mmr_load(int nasid, u64 offset);
#endif // __KERNEL__
#endif // _ASM_IA64_SN_TIO_TIOCX__

View file

@ -1,26 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 1999 by Ralf Baechle
*/
#ifndef _ASM_IA64_SN_TYPES_H
#define _ASM_IA64_SN_TYPES_H
#include <linux/types.h>
typedef unsigned long cpuid_t;
typedef signed short nasid_t; /* node id in numa-as-id space */
typedef signed char partid_t; /* partition ID type */
typedef unsigned int moduleid_t; /* user-visible module number type */
typedef unsigned int cmoduleid_t; /* kernel compact module id type */
typedef unsigned char slotid_t; /* slot (blade) within module */
typedef unsigned char slabid_t; /* slab (asic) within slot */
typedef u64 nic_t;
typedef unsigned long iopaddr_t;
typedef unsigned long paddr_t;
typedef short cnodeid_t;
#endif /* _ASM_IA64_SN_TYPES_H */

View file

@ -96,8 +96,6 @@ acpi_get_sysname(void)
} else if (!strcmp(hdr->oem_id, "SGI")) {
if (!strcmp(hdr->oem_table_id + 4, "UV"))
return "uv";
else
return "sn2";
}
#ifdef CONFIG_INTEL_IOMMU
@ -407,7 +405,7 @@ get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
int pxm;
pxm = pa->proximity_domain_lo;
if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
if (acpi_srat_revision >= 2)
pxm += pa->proximity_domain_hi[0] << 8;
return pxm;
}
@ -418,7 +416,7 @@ get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
int pxm;
pxm = ma->proximity_domain;
if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
if (acpi_srat_revision <= 1)
pxm &= 0xff;
return pxm;
@ -710,9 +708,8 @@ int __init acpi_boot_init(void)
if (acpi_table_parse_madt
(ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) {
if (!ia64_platform_is("sn2"))
printk(KERN_ERR PREFIX
"Error parsing MADT - no IOSAPIC entries\n");
printk(KERN_ERR PREFIX
"Error parsing MADT - no IOSAPIC entries\n");
}
/* System-Level Interrupt Routing */

View file

@ -73,17 +73,6 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
irq_redir[irq] = (char) (redir & 0xff);
}
}
bool is_affinity_mask_valid(const struct cpumask *cpumask)
{
if (ia64_platform_is("sn2")) {
/* Only allow one CPU to be specified in the smp_affinity mask */
if (cpumask_weight(cpumask) != 1)
return false;
}
return true;
}
#endif /* CONFIG_SMP */
int __init arch_early_irq_init(void)

View file

@ -110,13 +110,6 @@ check_versions (struct ia64_sal_systab *systab)
sal_revision = SAL_VERSION_CODE(2, 8);
sal_version = SAL_VERSION_CODE(0, 0);
}
if (ia64_platform_is("sn2") && (sal_revision == SAL_VERSION_CODE(2, 9)))
/*
* SGI Altix has hard-coded version 2.9 in their prom
* but they actually implement 3.2, so let's fix it here.
*/
sal_revision = SAL_VERSION_CODE(3, 2);
}
static void __init

View file

@ -260,11 +260,11 @@ __initcall(register_memory);
* in kdump case. See the comment in sba_init() in sba_iommu.c.
*
* So, the only machvec that really supports loading the kdump kernel
* over 4 GB is "sn2".
* over 4 GB is "uv".
*/
static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
{
if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
if (ia64_platform_is("uv"))
return 1;
else
return pbase < (1UL << 32);

View file

@ -57,7 +57,6 @@
#include <asm/sal.h>
#include <asm/tlbflush.h>
#include <asm/unistd.h>
#include <asm/sn/arch.h>
#define SMP_DEBUG 0
@ -658,11 +657,6 @@ int __cpu_disable(void)
return (-EBUSY);
}
if (ia64_platform_is("sn2")) {
if (!sn_cpu_disable_allowed(cpu))
return -EBUSY;
}
set_cpu_online(cpu, false);
if (migrate_platform_irqs(cpu)) {

View file

@ -24,7 +24,6 @@
#include <asm/pgtable.h>
#include <linux/atomic.h>
#include <asm/tlbflush.h>
#include <asm/sn/arch.h>
extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
@ -129,10 +128,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
preempt_disable();
if (ia64_platform_is("sn2"))
sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
else
flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
/* flush the just introduced uncached translation from the TLB */
local_flush_tlb_all();

View file

@ -1,12 +0,0 @@
# arch/ia64/sn/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2004 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn ia64 subplatform
#
obj-y += kernel/ pci/

View file

@ -1,81 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_IOERROR_H
#define _ASM_IA64_SN_IOERROR_H
/*
* IO error structure.
*
* This structure would expand to hold the information retrieved from
* all IO related error registers.
*
* This structure is defined to hold all system specific
* information related to a single error.
*
* This serves a couple of purpose.
* - Error handling often involves translating one form of address to other
* form. So, instead of having different data structures at each level,
* we have a single structure, and the appropriate fields get filled in
* at each layer.
* - This provides a way to dump all error related information in any layer
* of erorr handling (debugging aid).
*
* A second possibility is to allow each layer to define its own error
* data structure, and fill in the proper fields. This has the advantage
* of isolating the layers.
* A big concern is the potential stack usage (and overflow), if each layer
* defines these structures on stack (assuming we don't want to do kmalloc.
*
* Any layer wishing to pass extra information to a layer next to it in
* error handling hierarchy, can do so as a separate parameter.
*/
typedef struct io_error_s {
/* Bit fields indicating which structure fields are valid */
union {
struct {
unsigned ievb_errortype:1;
unsigned ievb_widgetnum:1;
unsigned ievb_widgetdev:1;
unsigned ievb_srccpu:1;
unsigned ievb_srcnode:1;
unsigned ievb_errnode:1;
unsigned ievb_sysioaddr:1;
unsigned ievb_xtalkaddr:1;
unsigned ievb_busspace:1;
unsigned ievb_busaddr:1;
unsigned ievb_vaddr:1;
unsigned ievb_memaddr:1;
unsigned ievb_epc:1;
unsigned ievb_ef:1;
unsigned ievb_tnum:1;
} iev_b;
unsigned iev_a;
} ie_v;
short ie_errortype; /* error type: extra info about error */
short ie_widgetnum; /* Widget number that's in error */
short ie_widgetdev; /* Device within widget in error */
cpuid_t ie_srccpu; /* CPU on srcnode generating error */
cnodeid_t ie_srcnode; /* Node which caused the error */
cnodeid_t ie_errnode; /* Node where error was noticed */
iopaddr_t ie_sysioaddr; /* Sys specific IO address */
iopaddr_t ie_xtalkaddr; /* Xtalk (48bit) addr of Error */
iopaddr_t ie_busspace; /* Bus specific address space */
iopaddr_t ie_busaddr; /* Bus specific address */
caddr_t ie_vaddr; /* Virtual address of error */
iopaddr_t ie_memaddr; /* Physical memory address */
caddr_t ie_epc; /* pc when error reported */
caddr_t ie_ef; /* eframe when error reported */
short ie_tnum; /* Xtalk TNUM field */
} ioerror_t;
#define IOERROR_INIT(e) do { (e)->ie_v.iev_a = 0; } while (0)
#define IOERROR_SETVALUE(e,f,v) do { (e)->ie_ ## f = (v); (e)->ie_v.iev_b.ievb_ ## f = 1; } while (0)
#endif /* _ASM_IA64_SN_IOERROR_H */

View file

@ -1,41 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_TIO_H
#define _ASM_IA64_SN_TIO_H
#define TIO_MMR_ADDR_MOD
#define TIO_NODE_ID TIO_MMR_ADDR_MOD(0x0000000090060e80)
#define TIO_ITTE_BASE 0xb0008800 /* base of translation table entries */
#define TIO_ITTE(bigwin) (TIO_ITTE_BASE + 8*(bigwin))
#define TIO_ITTE_OFFSET_BITS 8 /* size of offset field */
#define TIO_ITTE_OFFSET_MASK ((1<<TIO_ITTE_OFFSET_BITS)-1)
#define TIO_ITTE_OFFSET_SHIFT 0
#define TIO_ITTE_WIDGET_BITS 2 /* size of widget field */
#define TIO_ITTE_WIDGET_MASK ((1<<TIO_ITTE_WIDGET_BITS)-1)
#define TIO_ITTE_WIDGET_SHIFT 12
#define TIO_ITTE_VALID_MASK 0x1
#define TIO_ITTE_VALID_SHIFT 16
#define TIO_ITTE_WIDGET(itte) \
(((itte) >> TIO_ITTE_WIDGET_SHIFT) & TIO_ITTE_WIDGET_MASK)
#define TIO_ITTE_VALID(itte) \
(((itte) >> TIO_ITTE_VALID_SHIFT) & TIO_ITTE_VALID_MASK)
#define TIO_ITTE_PUT(nasid, bigwin, widget, addr, valid) \
REMOTE_HUB_S((nasid), TIO_ITTE(bigwin), \
(((((addr) >> TIO_BWIN_SIZE_BITS) & \
TIO_ITTE_OFFSET_MASK) << TIO_ITTE_OFFSET_SHIFT) | \
(((widget) & TIO_ITTE_WIDGET_MASK) << TIO_ITTE_WIDGET_SHIFT)) | \
(( (valid) & TIO_ITTE_VALID_MASK) << TIO_ITTE_VALID_SHIFT))
#endif /* _ASM_IA64_SN_TIO_H */

View file

@ -1,91 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
#define _ASM_IA64_SN_XTALK_HUBDEV_H
#include "xtalk/xwidgetdev.h"
#define HUB_WIDGET_ID_MAX 0xf
#define DEV_PER_WIDGET (2*2*8)
#define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */
#define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1)
#define IIO_ITTE_WIDGET_SHIFT 8
#define IIO_ITTE_WIDGET(itte) \
(((itte) >> IIO_ITTE_WIDGET_SHIFT) & IIO_ITTE_WIDGET_MASK)
/*
* Use the top big window as a surrogate for the first small window
*/
#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
#define IIO_NUM_ITTES 7
#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
/* This struct is shared between the PROM and the kernel.
* Changes to this struct will require corresponding changes to the kernel.
*/
struct sn_flush_device_common {
int sfdl_bus;
int sfdl_slot;
int sfdl_pin;
struct common_bar_list {
unsigned long start;
unsigned long end;
} sfdl_bar_list[6];
unsigned long sfdl_force_int_addr;
unsigned long sfdl_flush_value;
volatile unsigned long *sfdl_flush_addr;
u32 sfdl_persistent_busnum;
u32 sfdl_persistent_segment;
struct pcibus_info *sfdl_pcibus_info;
};
/* This struct is kernel only and is not used by the PROM */
struct sn_flush_device_kernel {
spinlock_t sfdl_flush_lock;
struct sn_flush_device_common *common;
};
/* 01/16/06 This struct is the old PROM/kernel struct and needs to be included
* for older official PROMs to function on the new kernel base. This struct
* will be removed when the next official PROM release occurs. */
struct sn_flush_device_war {
struct sn_flush_device_common common;
u32 filler; /* older PROMs expect the default size of a spinlock_t */
};
/*
* **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel.
*/
struct sn_flush_nasid_entry {
struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num
u64 iio_itte[8];
};
struct hubdev_info {
geoid_t hdi_geoid;
short hdi_nasid;
short hdi_peer_nasid; /* Dual Porting Peer */
struct sn_flush_nasid_entry hdi_flush_nasid_list;
struct xwidget_info hdi_xwidget_info[HUB_WIDGET_ID_MAX + 1];
void *hdi_nodepda;
void *hdi_node_vertex;
u32 max_segment_number;
u32 max_pcibus_number;
};
extern void hubdev_init_node(nodepda_t *, cnodeid_t);
extern void hub_error_init(struct hubdev_info *);
extern void ice_error_init(struct hubdev_info *);
#endif /* _ASM_IA64_SN_XTALK_HUBDEV_H */

View file

@ -1,301 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All Rights
* Reserved.
*/
#ifndef _ASM_IA64_SN_XTALK_XBOW_H
#define _ASM_IA64_SN_XTALK_XBOW_H
#define XBOW_PORT_8 0x8
#define XBOW_PORT_C 0xc
#define XBOW_PORT_F 0xf
#define MAX_XBOW_PORTS 8 /* number of ports on xbow chip */
#define BASE_XBOW_PORT XBOW_PORT_8 /* Lowest external port */
#define XBOW_CREDIT 4
#define MAX_XBOW_NAME 16
/* Register set for each xbow link */
typedef volatile struct xb_linkregs_s {
/*
* we access these through synergy unswizzled space, so the address
* gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
* That's why we put the register first and filler second.
*/
u32 link_ibf;
u32 filler0; /* filler for proper alignment */
u32 link_control;
u32 filler1;
u32 link_status;
u32 filler2;
u32 link_arb_upper;
u32 filler3;
u32 link_arb_lower;
u32 filler4;
u32 link_status_clr;
u32 filler5;
u32 link_reset;
u32 filler6;
u32 link_aux_status;
u32 filler7;
} xb_linkregs_t;
typedef volatile struct xbow_s {
/* standard widget configuration 0x000000-0x000057 */
struct widget_cfg xb_widget; /* 0x000000 */
/* helper fieldnames for accessing bridge widget */
#define xb_wid_id xb_widget.w_id
#define xb_wid_stat xb_widget.w_status
#define xb_wid_err_upper xb_widget.w_err_upper_addr
#define xb_wid_err_lower xb_widget.w_err_lower_addr
#define xb_wid_control xb_widget.w_control
#define xb_wid_req_timeout xb_widget.w_req_timeout
#define xb_wid_int_upper xb_widget.w_intdest_upper_addr
#define xb_wid_int_lower xb_widget.w_intdest_lower_addr
#define xb_wid_err_cmdword xb_widget.w_err_cmd_word
#define xb_wid_llp xb_widget.w_llp_cfg
#define xb_wid_stat_clr xb_widget.w_tflush
/*
* we access these through synergy unswizzled space, so the address
* gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
* That's why we put the register first and filler second.
*/
/* xbow-specific widget configuration 0x000058-0x0000FF */
u32 xb_wid_arb_reload; /* 0x00005C */
u32 _pad_000058;
u32 xb_perf_ctr_a; /* 0x000064 */
u32 _pad_000060;
u32 xb_perf_ctr_b; /* 0x00006c */
u32 _pad_000068;
u32 xb_nic; /* 0x000074 */
u32 _pad_000070;
/* Xbridge only */
u32 xb_w0_rst_fnc; /* 0x00007C */
u32 _pad_000078;
u32 xb_l8_rst_fnc; /* 0x000084 */
u32 _pad_000080;
u32 xb_l9_rst_fnc; /* 0x00008c */
u32 _pad_000088;
u32 xb_la_rst_fnc; /* 0x000094 */
u32 _pad_000090;
u32 xb_lb_rst_fnc; /* 0x00009c */
u32 _pad_000098;
u32 xb_lc_rst_fnc; /* 0x0000a4 */
u32 _pad_0000a0;
u32 xb_ld_rst_fnc; /* 0x0000ac */
u32 _pad_0000a8;
u32 xb_le_rst_fnc; /* 0x0000b4 */
u32 _pad_0000b0;
u32 xb_lf_rst_fnc; /* 0x0000bc */
u32 _pad_0000b8;
u32 xb_lock; /* 0x0000c4 */
u32 _pad_0000c0;
u32 xb_lock_clr; /* 0x0000cc */
u32 _pad_0000c8;
/* end of Xbridge only */
u32 _pad_0000d0[12];
/* Link Specific Registers, port 8..15 0x000100-0x000300 */
xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS];
} xbow_t;
#define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)]
#define XB_FLAGS_EXISTS 0x1 /* device exists */
#define XB_FLAGS_MASTER 0x2
#define XB_FLAGS_SLAVE 0x0
#define XB_FLAGS_GBR 0x4
#define XB_FLAGS_16BIT 0x8
#define XB_FLAGS_8BIT 0x0
/* is widget port number valid? (based on version 7.0 of xbow spec) */
#define XBOW_WIDGET_IS_VALID(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_F)
/* whether to use upper or lower arbitration register, given source widget id */
#define XBOW_ARB_IS_UPPER(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_B)
#define XBOW_ARB_IS_LOWER(wid) ((wid) >= XBOW_PORT_C && (wid) <= XBOW_PORT_F)
/* offset of arbitration register, given source widget id */
#define XBOW_ARB_OFF(wid) (XBOW_ARB_IS_UPPER(wid) ? 0x1c : 0x24)
#define XBOW_WID_ID WIDGET_ID
#define XBOW_WID_STAT WIDGET_STATUS
#define XBOW_WID_ERR_UPPER WIDGET_ERR_UPPER_ADDR
#define XBOW_WID_ERR_LOWER WIDGET_ERR_LOWER_ADDR
#define XBOW_WID_CONTROL WIDGET_CONTROL
#define XBOW_WID_REQ_TO WIDGET_REQ_TIMEOUT
#define XBOW_WID_INT_UPPER WIDGET_INTDEST_UPPER_ADDR
#define XBOW_WID_INT_LOWER WIDGET_INTDEST_LOWER_ADDR
#define XBOW_WID_ERR_CMDWORD WIDGET_ERR_CMD_WORD
#define XBOW_WID_LLP WIDGET_LLP_CFG
#define XBOW_WID_STAT_CLR WIDGET_TFLUSH
#define XBOW_WID_ARB_RELOAD 0x5c
#define XBOW_WID_PERF_CTR_A 0x64
#define XBOW_WID_PERF_CTR_B 0x6c
#define XBOW_WID_NIC 0x74
/* Xbridge only */
#define XBOW_W0_RST_FNC 0x00007C
#define XBOW_L8_RST_FNC 0x000084
#define XBOW_L9_RST_FNC 0x00008c
#define XBOW_LA_RST_FNC 0x000094
#define XBOW_LB_RST_FNC 0x00009c
#define XBOW_LC_RST_FNC 0x0000a4
#define XBOW_LD_RST_FNC 0x0000ac
#define XBOW_LE_RST_FNC 0x0000b4
#define XBOW_LF_RST_FNC 0x0000bc
#define XBOW_RESET_FENCE(x) ((x) > 7 && (x) < 16) ? \
(XBOW_W0_RST_FNC + ((x) - 7) * 8) : \
((x) == 0) ? XBOW_W0_RST_FNC : 0
#define XBOW_LOCK 0x0000c4
#define XBOW_LOCK_CLR 0x0000cc
/* End of Xbridge only */
/* used only in ide, but defined here within the reserved portion */
/* of the widget0 address space (before 0xf4) */
#define XBOW_WID_UNDEF 0xe4
/* xbow link register set base, legal value for x is 0x8..0xf */
#define XB_LINK_BASE 0x100
#define XB_LINK_OFFSET 0x40
#define XB_LINK_REG_BASE(x) (XB_LINK_BASE + ((x) & (MAX_XBOW_PORTS - 1)) * XB_LINK_OFFSET)
#define XB_LINK_IBUF_FLUSH(x) (XB_LINK_REG_BASE(x) + 0x4)
#define XB_LINK_CTRL(x) (XB_LINK_REG_BASE(x) + 0xc)
#define XB_LINK_STATUS(x) (XB_LINK_REG_BASE(x) + 0x14)
#define XB_LINK_ARB_UPPER(x) (XB_LINK_REG_BASE(x) + 0x1c)
#define XB_LINK_ARB_LOWER(x) (XB_LINK_REG_BASE(x) + 0x24)
#define XB_LINK_STATUS_CLR(x) (XB_LINK_REG_BASE(x) + 0x2c)
#define XB_LINK_RESET(x) (XB_LINK_REG_BASE(x) + 0x34)
#define XB_LINK_AUX_STATUS(x) (XB_LINK_REG_BASE(x) + 0x3c)
/* link_control(x) */
#define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */
/* reserved: 0x40000000 */
#define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */
#define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer
level */
#define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8
bit mode */
#define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP
packet */
#define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit
mask */
#define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit
shift */
#define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination
*/
#define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input
buffer */
/* reserved: 0x0000fe00 */
#define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */
#define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */
#define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */
#define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */
#define XB_CTRL_RCV_IE 0x00000010 /* receive */
#define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */
/* reserved: 0x00000004 */
#define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request
timeout */
#define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */
/* link_status(x) */
#define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE
/* reserved: 0x7ff80000 */
#define XB_STAT_MULTI_ERR 0x00040000 /* multi error */
#define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE
#define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE
#define XB_STAT_BNDWDTH_ALLOC_ID_MSK 0x0000ff00 /* port bitmask */
#define XB_STAT_RCV_CNT_OFLOW_ERR XB_CTRL_RCV_CNT_OFLOW_IE
#define XB_STAT_XMT_CNT_OFLOW_ERR XB_CTRL_XMT_CNT_OFLOW_IE
#define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE
#define XB_STAT_RCV_ERR XB_CTRL_RCV_IE
#define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE
/* reserved: 0x00000004 */
#define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE
#define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE
/* link_aux_status(x) */
#define XB_AUX_STAT_RCV_CNT 0xff000000
#define XB_AUX_STAT_XMT_CNT 0x00ff0000
#define XB_AUX_STAT_TOUT_DST 0x0000ff00
#define XB_AUX_LINKFAIL_RST_BAD 0x00000040
#define XB_AUX_STAT_PRESENT 0x00000020
#define XB_AUX_STAT_PORT_WIDTH 0x00000010
/* reserved: 0x0000000f */
/*
* link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper
* register if (x) is 0x8..0xb, link_arb_lower if (x) is 0xc..0xf
*/
#define XB_ARB_GBR_MSK 0x1f
#define XB_ARB_RR_MSK 0x7
#define XB_ARB_GBR_SHFT(x) (((x) & 0x3) * 8)
#define XB_ARB_RR_SHFT(x) (((x) & 0x3) * 8 + 5)
#define XB_ARB_GBR_CNT(reg,x) ((reg) >> XB_ARB_GBR_SHFT(x) & XB_ARB_GBR_MSK)
#define XB_ARB_RR_CNT(reg,x) ((reg) >> XB_ARB_RR_SHFT(x) & XB_ARB_RR_MSK)
/* XBOW_WID_STAT */
#define XB_WID_STAT_LINK_INTR_SHFT (24)
#define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT)
#define XB_WID_STAT_LINK_INTR(x) \
(0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT))
#define XB_WID_STAT_WIDGET0_INTR 0x00800000
#define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */
#define XB_WID_STAT_REG_ACC_ERR 0x00000020
#define XB_WID_STAT_RECV_TOUT 0x00000010 /* Xbridge only */
#define XB_WID_STAT_ARB_TOUT 0x00000008 /* Xbridge only */
#define XB_WID_STAT_XTALK_ERR 0x00000004
#define XB_WID_STAT_DST_TOUT 0x00000002 /* Xbridge only */
#define XB_WID_STAT_MULTI_ERR 0x00000001
#define XB_WID_STAT_SRCID_SHFT 6
/* XBOW_WID_CONTROL */
#define XB_WID_CTRL_REG_ACC_IE XB_WID_STAT_REG_ACC_ERR
#define XB_WID_CTRL_RECV_TOUT XB_WID_STAT_RECV_TOUT
#define XB_WID_CTRL_ARB_TOUT XB_WID_STAT_ARB_TOUT
#define XB_WID_CTRL_XTALK_IE XB_WID_STAT_XTALK_ERR
/* XBOW_WID_INT_UPPER */
/* defined in xwidget.h for WIDGET_INTDEST_UPPER_ADDR */
/* XBOW WIDGET part number, in the ID register */
#define XBOW_WIDGET_PART_NUM 0x0 /* crossbow */
#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */
#define XBOW_WIDGET_MFGR_NUM 0x0
#define XXBOW_WIDGET_MFGR_NUM 0x0
#define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */
#define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */
#define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */
#define XBOW_REV_1_2 0x3 /* xbow rev 1.2 is "3" */
#define XBOW_REV_1_3 0x4 /* xbow rev 1.3 is "4" */
#define XBOW_REV_2_0 0x5 /* xbow rev 2.0 is "5" */
#define XXBOW_PART_REV_1_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x1 )
#define XXBOW_PART_REV_2_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x2 )
/* XBOW_WID_ARB_RELOAD */
#define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */
#define IS_XBRIDGE_XBOW(wid) \
(XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \
XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
#define IS_PIC_XBOW(wid) \
(XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \
XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
#define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv)
#endif /* _ASM_IA64_SN_XTALK_XBOW_H */

View file

@ -1,70 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_XTALK_XWIDGET_H
#define _ASM_IA64_SN_XTALK_XWIDGET_H
/* WIDGET_ID */
#define WIDGET_REV_NUM 0xf0000000
#define WIDGET_PART_NUM 0x0ffff000
#define WIDGET_MFG_NUM 0x00000ffe
#define WIDGET_REV_NUM_SHFT 28
#define WIDGET_PART_NUM_SHFT 12
#define WIDGET_MFG_NUM_SHFT 1
#define XWIDGET_PART_NUM(widgetid) (((widgetid) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT)
#define XWIDGET_REV_NUM(widgetid) (((widgetid) & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT)
#define XWIDGET_MFG_NUM(widgetid) (((widgetid) & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT)
#define XWIDGET_PART_REV_NUM(widgetid) ((XWIDGET_PART_NUM(widgetid) << 4) | \
XWIDGET_REV_NUM(widgetid))
#define XWIDGET_PART_REV_NUM_REV(partrev) (partrev & 0xf)
/* widget configuration registers */
struct widget_cfg{
u32 w_id; /* 0x04 */
u32 w_pad_0; /* 0x00 */
u32 w_status; /* 0x0c */
u32 w_pad_1; /* 0x08 */
u32 w_err_upper_addr; /* 0x14 */
u32 w_pad_2; /* 0x10 */
u32 w_err_lower_addr; /* 0x1c */
u32 w_pad_3; /* 0x18 */
u32 w_control; /* 0x24 */
u32 w_pad_4; /* 0x20 */
u32 w_req_timeout; /* 0x2c */
u32 w_pad_5; /* 0x28 */
u32 w_intdest_upper_addr; /* 0x34 */
u32 w_pad_6; /* 0x30 */
u32 w_intdest_lower_addr; /* 0x3c */
u32 w_pad_7; /* 0x38 */
u32 w_err_cmd_word; /* 0x44 */
u32 w_pad_8; /* 0x40 */
u32 w_llp_cfg; /* 0x4c */
u32 w_pad_9; /* 0x48 */
u32 w_tflush; /* 0x54 */
u32 w_pad_10; /* 0x50 */
};
/*
* Crosstalk Widget Hardware Identification, as defined in the Crosstalk spec.
*/
struct xwidget_hwid{
int mfg_num;
int rev_num;
int part_num;
};
struct xwidget_info{
struct xwidget_hwid xwi_hwid; /* Widget Identification */
char xwi_masterxid; /* Hub's Widget Port Number */
void *xwi_hubinfo; /* Hub's provider private info */
u64 *xwi_hub_provider; /* prom provider functions */
void *xwi_vertex;
};
#endif /* _ASM_IA64_SN_XTALK_XWIDGET_H */

View file

@ -1,17 +0,0 @@
# arch/ia64/sn/kernel/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1999,2001-2006,2008 Silicon Graphics, Inc. All Rights Reserved.
#
ccflags-y := -I $(srctree)/arch/ia64/sn/include
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
huberror.o io_acpi_init.o io_common.o \
io_init.o iomv.o klconflib.o pio_phys.o \
sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_PCI_MSI) += msi_sn.o

View file

@ -1,475 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/module.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/pda.h>
#include <asm/sn/shubio.h>
#include <asm/nodedata.h>
#include <asm/delay.h>
#include <linux/memblock.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <asm/sn/bte.h>
#ifndef L1_CACHE_MASK
#define L1_CACHE_MASK (L1_CACHE_BYTES - 1)
#endif
/* two interfaces on two btes */
#define MAX_INTERFACES_TO_TRY 4
#define MAX_NODES_TO_TRY 2
static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface)
{
nodepda_t *tmp_nodepda;
if (nasid_to_cnodeid(nasid) == -1)
return (struct bteinfo_s *)NULL;
tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid));
return &tmp_nodepda->bte_if[interface];
}
static inline void bte_start_transfer(struct bteinfo_s *bte, u64 len, u64 mode)
{
if (is_shub2()) {
BTE_CTRL_STORE(bte, (IBLS_BUSY | ((len) | (mode) << 24)));
} else {
BTE_LNSTAT_STORE(bte, len);
BTE_CTRL_STORE(bte, mode);
}
}
/************************************************************************
* Block Transfer Engine copy related functions.
*
***********************************************************************/
/*
* bte_copy(src, dest, len, mode, notification)
*
* Use the block transfer engine to move kernel memory from src to dest
* using the assigned mode.
*
* Parameters:
* src - physical address of the transfer source.
* dest - physical address of the transfer destination.
* len - number of bytes to transfer from source to dest.
* mode - hardware defined. See reference information
* for IBCT0/1 in the SHUB Programmers Reference
* notification - kernel virtual address of the notification cache
* line. If NULL, the default is used and
* the bte_copy is synchronous.
*
* NOTE: This function requires src, dest, and len to
* be cacheline aligned.
*/
bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
{
u64 transfer_size;
u64 transfer_stat;
u64 notif_phys_addr;
struct bteinfo_s *bte;
bte_result_t bte_status;
unsigned long irq_flags;
unsigned long itc_end = 0;
int nasid_to_try[MAX_NODES_TO_TRY];
int my_nasid = cpuid_to_nasid(raw_smp_processor_id());
int bte_if_index, nasid_index;
int bte_first, btes_per_node = BTES_PER_NODE;
BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n",
src, dest, len, mode, notification));
if (len == 0) {
return BTE_SUCCESS;
}
BUG_ON(len & L1_CACHE_MASK);
BUG_ON(src & L1_CACHE_MASK);
BUG_ON(dest & L1_CACHE_MASK);
BUG_ON(len > BTE_MAX_XFER);
/*
* Start with interface corresponding to cpu number
*/
bte_first = raw_smp_processor_id() % btes_per_node;
if (mode & BTE_USE_DEST) {
/* try remote then local */
nasid_to_try[0] = NASID_GET(dest);
if (mode & BTE_USE_ANY) {
nasid_to_try[1] = my_nasid;
} else {
nasid_to_try[1] = 0;
}
} else {
/* try local then remote */
nasid_to_try[0] = my_nasid;
if (mode & BTE_USE_ANY) {
nasid_to_try[1] = NASID_GET(dest);
} else {
nasid_to_try[1] = 0;
}
}
retry_bteop:
do {
local_irq_save(irq_flags);
bte_if_index = bte_first;
nasid_index = 0;
/* Attempt to lock one of the BTE interfaces. */
while (nasid_index < MAX_NODES_TO_TRY) {
bte = bte_if_on_node(nasid_to_try[nasid_index],bte_if_index);
if (bte == NULL) {
nasid_index++;
continue;
}
if (spin_trylock(&bte->spinlock)) {
if (!(*bte->most_rcnt_na & BTE_WORD_AVAILABLE) ||
(BTE_LNSTAT_LOAD(bte) & BTE_ACTIVE)) {
/* Got the lock but BTE still busy */
spin_unlock(&bte->spinlock);
} else {
/* we got the lock and it's not busy */
break;
}
}
bte_if_index = (bte_if_index + 1) % btes_per_node; /* Next interface */
if (bte_if_index == bte_first) {
/*
* We've tried all interfaces on this node
*/
nasid_index++;
}
bte = NULL;
}
if (bte != NULL) {
break;
}
local_irq_restore(irq_flags);
if (!(mode & BTE_WACQUIRE)) {
return BTEFAIL_NOTAVAIL;
}
} while (1);
if (notification == NULL) {
/* User does not want to be notified. */
bte->most_rcnt_na = &bte->notify;
} else {
bte->most_rcnt_na = notification;
}
/* Calculate the number of cache lines to transfer. */
transfer_size = ((len >> L1_CACHE_SHIFT) & BTE_LEN_MASK);
/* Initialize the notification to a known value. */
*bte->most_rcnt_na = BTE_WORD_BUSY;
notif_phys_addr = (u64)bte->most_rcnt_na;
/* Set the source and destination registers */
BTE_PRINTKV(("IBSA = 0x%lx)\n", src));
BTE_SRC_STORE(bte, src);
BTE_PRINTKV(("IBDA = 0x%lx)\n", dest));
BTE_DEST_STORE(bte, dest);
/* Set the notification register */
BTE_PRINTKV(("IBNA = 0x%lx)\n", notif_phys_addr));
BTE_NOTIF_STORE(bte, notif_phys_addr);
/* Initiate the transfer */
BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
bte_start_transfer(bte, transfer_size, BTE_VALID_MODE(mode));
itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec);
spin_unlock_irqrestore(&bte->spinlock, irq_flags);
if (notification != NULL) {
return BTE_SUCCESS;
}
while ((transfer_stat = *bte->most_rcnt_na) == BTE_WORD_BUSY) {
cpu_relax();
if (ia64_get_itc() > itc_end) {
BTE_PRINTK(("BTE timeout nasid 0x%x bte%d IBLS = 0x%lx na 0x%lx\n",
NASID_GET(bte->bte_base_addr), bte->bte_num,
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na) );
bte->bte_error_count++;
bte->bh_error = IBLS_ERROR;
bte_error_handler(NODEPDA(bte->bte_cnode));
*bte->most_rcnt_na = BTE_WORD_AVAILABLE;
goto retry_bteop;
}
}
BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
if (transfer_stat & IBLS_ERROR) {
bte_status = BTE_GET_ERROR_STATUS(transfer_stat);
} else {
bte_status = BTE_SUCCESS;
}
*bte->most_rcnt_na = BTE_WORD_AVAILABLE;
BTE_PRINTK(("Returning status is 0x%lx and most_rcnt_na is 0x%lx\n",
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
return bte_status;
}
EXPORT_SYMBOL(bte_copy);
/*
* bte_unaligned_copy(src, dest, len, mode)
*
* use the block transfer engine to move kernel
* memory from src to dest using the assigned mode.
*
* Parameters:
* src - physical address of the transfer source.
* dest - physical address of the transfer destination.
* len - number of bytes to transfer from source to dest.
* mode - hardware defined. See reference information
* for IBCT0/1 in the SGI documentation.
*
* NOTE: If the source, dest, and len are all cache line aligned,
* then it would be _FAR_ preferable to use bte_copy instead.
*/
bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
{
int destFirstCacheOffset;
u64 headBteSource;
u64 headBteLen;
u64 headBcopySrcOffset;
u64 headBcopyDest;
u64 headBcopyLen;
u64 footBteSource;
u64 footBteLen;
u64 footBcopyDest;
u64 footBcopyLen;
bte_result_t rv;
char *bteBlock, *bteBlock_unaligned;
if (len == 0) {
return BTE_SUCCESS;
}
/* temporary buffer used during unaligned transfers */
bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES, GFP_KERNEL);
if (bteBlock_unaligned == NULL) {
return BTEFAIL_NOTAVAIL;
}
bteBlock = (char *)L1_CACHE_ALIGN((u64) bteBlock_unaligned);
headBcopySrcOffset = src & L1_CACHE_MASK;
destFirstCacheOffset = dest & L1_CACHE_MASK;
/*
* At this point, the transfer is broken into
* (up to) three sections. The first section is
* from the start address to the first physical
* cache line, the second is from the first physical
* cache line to the last complete cache line,
* and the third is from the last cache line to the
* end of the buffer. The first and third sections
* are handled by bte copying into a temporary buffer
* and then bcopy'ing the necessary section into the
* final location. The middle section is handled with
* a standard bte copy.
*
* One nasty exception to the above rule is when the
* source and destination are not symmetrically
* mis-aligned. If the source offset from the first
* cache line is different from the destination offset,
* we make the first section be the entire transfer
* and the bcopy the entire block into place.
*/
if (headBcopySrcOffset == destFirstCacheOffset) {
/*
* Both the source and destination are the same
* distance from a cache line boundary so we can
* use the bte to transfer the bulk of the
* data.
*/
headBteSource = src & ~L1_CACHE_MASK;
headBcopyDest = dest;
if (headBcopySrcOffset) {
headBcopyLen =
(len >
(L1_CACHE_BYTES -
headBcopySrcOffset) ? L1_CACHE_BYTES
- headBcopySrcOffset : len);
headBteLen = L1_CACHE_BYTES;
} else {
headBcopyLen = 0;
headBteLen = 0;
}
if (len > headBcopyLen) {
footBcopyLen = (len - headBcopyLen) & L1_CACHE_MASK;
footBteLen = L1_CACHE_BYTES;
footBteSource = src + len - footBcopyLen;
footBcopyDest = dest + len - footBcopyLen;
if (footBcopyDest == (headBcopyDest + headBcopyLen)) {
/*
* We have two contiguous bcopy
* blocks. Merge them.
*/
headBcopyLen += footBcopyLen;
headBteLen += footBteLen;
} else if (footBcopyLen > 0) {
rv = bte_copy(footBteSource,
ia64_tpa((unsigned long)bteBlock),
footBteLen, mode, NULL);
if (rv != BTE_SUCCESS) {
kfree(bteBlock_unaligned);
return rv;
}
memcpy(__va(footBcopyDest),
(char *)bteBlock, footBcopyLen);
}
} else {
footBcopyLen = 0;
footBteLen = 0;
}
if (len > (headBcopyLen + footBcopyLen)) {
/* now transfer the middle. */
rv = bte_copy((src + headBcopyLen),
(dest +
headBcopyLen),
(len - headBcopyLen -
footBcopyLen), mode, NULL);
if (rv != BTE_SUCCESS) {
kfree(bteBlock_unaligned);
return rv;
}
}
} else {
/*
* The transfer is not symmetric, we will
* allocate a buffer large enough for all the
* data, bte_copy into that buffer and then
* bcopy to the destination.
*/
headBcopySrcOffset = src & L1_CACHE_MASK;
headBcopyDest = dest;
headBcopyLen = len;
headBteSource = src - headBcopySrcOffset;
/* Add the leading and trailing bytes from source */
headBteLen = L1_CACHE_ALIGN(len + headBcopySrcOffset);
}
if (headBcopyLen > 0) {
rv = bte_copy(headBteSource,
ia64_tpa((unsigned long)bteBlock), headBteLen,
mode, NULL);
if (rv != BTE_SUCCESS) {
kfree(bteBlock_unaligned);
return rv;
}
memcpy(__va(headBcopyDest), ((char *)bteBlock +
headBcopySrcOffset), headBcopyLen);
}
kfree(bteBlock_unaligned);
return BTE_SUCCESS;
}
EXPORT_SYMBOL(bte_unaligned_copy);
/************************************************************************
* Block Transfer Engine initialization functions.
*
***********************************************************************/
static void bte_recovery_timeout(struct timer_list *t)
{
struct nodepda_s *nodepda = from_timer(nodepda, t, bte_recovery_timer);
bte_error_handler(nodepda);
}
/*
* bte_init_node(nodepda, cnode)
*
* Initialize the nodepda structure with BTE base addresses and
* spinlocks.
*/
void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
{
int i;
/*
* Indicate that all the block transfer engines on this node
* are available.
*/
/*
* Allocate one bte_recover_t structure per node. It holds
* the recovery lock for node. All the bte interface structures
* will point at this one bte_recover structure to get the lock.
*/
spin_lock_init(&mynodepda->bte_recovery_lock);
timer_setup(&mynodepda->bte_recovery_timer, bte_recovery_timeout, 0);
for (i = 0; i < BTES_PER_NODE; i++) {
u64 *base_addr;
/* Which link status register should we use? */
base_addr = (u64 *)
REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), BTE_BASE_ADDR(i));
mynodepda->bte_if[i].bte_base_addr = base_addr;
mynodepda->bte_if[i].bte_source_addr = BTE_SOURCE_ADDR(base_addr);
mynodepda->bte_if[i].bte_destination_addr = BTE_DEST_ADDR(base_addr);
mynodepda->bte_if[i].bte_control_addr = BTE_CTRL_ADDR(base_addr);
mynodepda->bte_if[i].bte_notify_addr = BTE_NOTIF_ADDR(base_addr);
/*
* Initialize the notification and spinlock
* so the first transfer can occur.
*/
mynodepda->bte_if[i].most_rcnt_na =
&(mynodepda->bte_if[i].notify);
mynodepda->bte_if[i].notify = BTE_WORD_AVAILABLE;
spin_lock_init(&mynodepda->bte_if[i].spinlock);
mynodepda->bte_if[i].bte_cnode = cnode;
mynodepda->bte_if[i].bte_error_count = 0;
mynodepda->bte_if[i].bte_num = i;
mynodepda->bte_if[i].cleanup_active = 0;
mynodepda->bte_if[i].bh_error = 0;
}
}

View file

@ -1,255 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
#include <asm/sn/sn_sal.h>
#include "ioerror.h"
#include <asm/sn/addrs.h>
#include <asm/sn/shubio.h>
#include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
#include <asm/sn/bte.h>
#include <asm/param.h>
/*
* Bte error handling is done in two parts. The first captures
* any crb related errors. Since there can be multiple crbs per
* interface and multiple interfaces active, we need to wait until
* all active crbs are completed. This is the first job of the
* second part error handler. When all bte related CRBs are cleanly
* completed, it resets the interfaces and gets them ready for new
* transfers to be queued.
*/
/*
* Wait until all BTE related CRBs are completed
* and then reset the interfaces.
*/
static int shub1_bte_error_handler(struct nodepda_s *err_nodepda)
{
struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
nasid_t nasid;
int i;
int valid_crbs;
ii_imem_u_t imem; /* II IMEM Register */
ii_icrb0_d_u_t icrbd; /* II CRB Register D */
ii_ibcr_u_t ibcr;
ii_icmr_u_t icmr;
ii_ieclr_u_t ieclr;
BTE_PRINTK(("shub1_bte_error_handler(%p) - %d\n", err_nodepda,
smp_processor_id()));
if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
(err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
smp_processor_id()));
return 1;
}
/* Determine information about our hub */
nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
/*
* A BTE transfer can use multiple CRBs. We need to make sure
* that all the BTE CRBs are complete (or timed out) before
* attempting to clean up the error. Resetting the BTE while
* there are still BTE CRBs active will hang the BTE.
* We should look at all the CRBs to see if they are allocated
* to the BTE and see if they are still active. When none
* are active, we can continue with the cleanup.
*
* We also want to make sure that the local NI port is up.
* When a router resets the NI port can go down, while it
* goes through the LLP handshake, but then comes back up.
*/
icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR);
if (icmr.ii_icmr_fld_s.i_crb_mark != 0) {
/*
* There are errors which still need to be cleaned up by
* hubiio_crb_error_handler
*/
mod_timer(recovery_timer, jiffies + (HZ * 5));
BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
smp_processor_id()));
return 1;
}
if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld;
for (i = 0; i < IIO_NUM_CRBS; i++) {
if (!((1 << i) & valid_crbs)) {
/* This crb was not marked as valid, ignore */
continue;
}
icrbd.ii_icrb0_d_regval =
REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
if (icrbd.d_bteop) {
mod_timer(recovery_timer, jiffies + (HZ * 5));
BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
err_nodepda, smp_processor_id(),
i));
return 1;
}
}
}
BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
/* Re-enable both bte interfaces */
imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
/* Clear BTE0/1 error bits */
ieclr.ii_ieclr_regval = 0;
if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS)
ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1;
if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS)
ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1;
REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval);
/* Reinitialize both BTE state machines. */
ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
del_timer(recovery_timer);
return 0;
}
/*
* Wait until all BTE related CRBs are completed
* and then reset the interfaces.
*/
static int shub2_bte_error_handler(struct nodepda_s *err_nodepda)
{
struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
struct bteinfo_s *bte;
nasid_t nasid;
u64 status;
int i;
nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
/*
* Verify that all the BTEs are complete
*/
for (i = 0; i < BTES_PER_NODE; i++) {
bte = &err_nodepda->bte_if[i];
status = BTE_LNSTAT_LOAD(bte);
if (status & IBLS_ERROR) {
bte->bh_error = BTE_SHUB2_ERROR(status);
continue;
}
if (!(status & IBLS_BUSY))
continue;
mod_timer(recovery_timer, jiffies + (HZ * 5));
BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
smp_processor_id()));
return 1;
}
if (ia64_sn_bte_recovery(nasid))
panic("bte_error_handler(): Fatal BTE Error");
del_timer(recovery_timer);
return 0;
}
/*
* Wait until all BTE related CRBs are completed
* and then reset the interfaces.
*/
void bte_error_handler(struct nodepda_s *err_nodepda)
{
spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
int i;
unsigned long irq_flags;
volatile u64 *notify;
bte_result_t bh_error;
BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
smp_processor_id()));
spin_lock_irqsave(recovery_lock, irq_flags);
/*
* Lock all interfaces on this node to prevent new transfers
* from being queued.
*/
for (i = 0; i < BTES_PER_NODE; i++) {
if (err_nodepda->bte_if[i].cleanup_active) {
continue;
}
spin_lock(&err_nodepda->bte_if[i].spinlock);
BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
smp_processor_id(), i));
err_nodepda->bte_if[i].cleanup_active = 1;
}
if (is_shub1()) {
if (shub1_bte_error_handler(err_nodepda)) {
spin_unlock_irqrestore(recovery_lock, irq_flags);
return;
}
} else {
if (shub2_bte_error_handler(err_nodepda)) {
spin_unlock_irqrestore(recovery_lock, irq_flags);
return;
}
}
for (i = 0; i < BTES_PER_NODE; i++) {
bh_error = err_nodepda->bte_if[i].bh_error;
if (bh_error != BTE_SUCCESS) {
/* There is an error which needs to be notified */
notify = err_nodepda->bte_if[i].most_rcnt_na;
BTE_PRINTK(("cnode %d bte %d error=0x%lx\n",
err_nodepda->bte_if[i].bte_cnode,
err_nodepda->bte_if[i].bte_num,
IBLS_ERROR | (u64) bh_error));
*notify = IBLS_ERROR | bh_error;
err_nodepda->bte_if[i].bh_error = BTE_SUCCESS;
}
err_nodepda->bte_if[i].cleanup_active = 0;
BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda,
smp_processor_id(), i));
spin_unlock(&err_nodepda->bte_if[i].spinlock);
}
spin_unlock_irqrestore(recovery_lock, irq_flags);
}
/*
* First part error handler. This is called whenever any error CRB interrupt
* is generated by the II.
*/
void
bte_crb_error_handler(cnodeid_t cnode, int btenum,
int crbnum, ioerror_t * ioe, int bteop)
{
struct bteinfo_s *bte;
bte = &(NODEPDA(cnode)->bte_if[btenum]);
/*
* The caller has already figured out the error type, we save that
* in the bte handle structure for the thread exercising the
* interface to consume.
*/
bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
bte->bte_error_count++;
BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n",
bte->bte_cnode, bte->bte_num, ioe->ie_errortype));
bte_error_handler(NODEPDA(cnode));
}

View file

@ -1,220 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000,2002-2007 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/interrupt.h>
#include <asm/delay.h>
#include <asm/sn/sn_sal.h>
#include "ioerror.h"
#include <asm/sn/addrs.h>
#include <asm/sn/shubio.h>
#include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
#include <asm/sn/bte.h>
void hubiio_crb_error_handler(struct hubdev_info *hubdev_info);
extern void bte_crb_error_handler(cnodeid_t, int, int, ioerror_t *,
int);
static irqreturn_t hub_eint_handler(int irq, void *arg)
{
struct hubdev_info *hubdev_info;
struct ia64_sal_retval ret_stuff;
nasid_t nasid;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
hubdev_info = (struct hubdev_info *)arg;
nasid = hubdev_info->hdi_nasid;
if (is_shub1()) {
SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
(u64) nasid, 0, 0, 0, 0, 0, 0);
if ((int)ret_stuff.v0)
panic("%s: Fatal %s Error", __func__,
((nasid & 1) ? "TIO" : "HUBII"));
if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
(void)hubiio_crb_error_handler(hubdev_info);
} else
if (nasid & 1) { /* TIO errors */
SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
(u64) nasid, 0, 0, 0, 0, 0, 0);
if ((int)ret_stuff.v0)
panic("%s: Fatal TIO Error", __func__);
} else
bte_error_handler(NODEPDA(nasid_to_cnodeid(nasid)));
return IRQ_HANDLED;
}
/*
* Free the hub CRB "crbnum" which encountered an error.
* Assumption is, error handling was successfully done,
* and we now want to return the CRB back to Hub for normal usage.
*
* In order to free the CRB, all that's needed is to de-allocate it
*
* Assumption:
* No other processor is mucking around with the hub control register.
* So, upper layer has to single thread this.
*/
void hubiio_crb_free(struct hubdev_info *hubdev_info, int crbnum)
{
ii_icrb0_b_u_t icrbb;
/*
* The hardware does NOT clear the mark bit, so it must get cleared
* here to be sure the error is not processed twice.
*/
icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(hubdev_info->hdi_nasid,
IIO_ICRB_B(crbnum));
icrbb.b_mark = 0;
REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICRB_B(crbnum),
icrbb.ii_icrb0_b_regval);
/*
* Deallocate the register wait till hub indicates it's done.
*/
REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum));
while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND)
cpu_relax();
}
/*
* hubiio_crb_error_handler
*
* This routine gets invoked when a hub gets an error
* interrupt. So, the routine is running in interrupt context
* at error interrupt level.
* Action:
* It's responsible for identifying ALL the CRBs that are marked
* with error, and process them.
*
* If you find the CRB that's marked with error, map this to the
* reason it caused error, and invoke appropriate error handler.
*
* XXX Be aware of the information in the context register.
*
* NOTE:
* Use REMOTE_HUB_* macro instead of LOCAL_HUB_* so that the interrupt
* handler can be run on any node. (not necessarily the node
* corresponding to the hub that encountered error).
*/
void hubiio_crb_error_handler(struct hubdev_info *hubdev_info)
{
nasid_t nasid;
ii_icrb0_a_u_t icrba; /* II CRB Register A */
ii_icrb0_b_u_t icrbb; /* II CRB Register B */
ii_icrb0_c_u_t icrbc; /* II CRB Register C */
ii_icrb0_d_u_t icrbd; /* II CRB Register D */
ii_icrb0_e_u_t icrbe; /* II CRB Register D */
int i;
int num_errors = 0; /* Num of errors handled */
ioerror_t ioerror;
nasid = hubdev_info->hdi_nasid;
/*
* XXX - Add locking for any recovery actions
*/
/*
* Scan through all CRBs in the Hub, and handle the errors
* in any of the CRBs marked.
*/
for (i = 0; i < IIO_NUM_CRBS; i++) {
/* Check this crb entry to see if it is in error. */
icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(nasid, IIO_ICRB_B(i));
if (icrbb.b_mark == 0) {
continue;
}
icrba.ii_icrb0_a_regval = REMOTE_HUB_L(nasid, IIO_ICRB_A(i));
IOERROR_INIT(&ioerror);
/* read other CRB error registers. */
icrbc.ii_icrb0_c_regval = REMOTE_HUB_L(nasid, IIO_ICRB_C(i));
icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
icrbe.ii_icrb0_e_regval = REMOTE_HUB_L(nasid, IIO_ICRB_E(i));
IOERROR_SETVALUE(&ioerror, errortype, icrbb.b_ecode);
/* Check if this error is due to BTE operation,
* and handle it separately.
*/
if (icrbd.d_bteop ||
((icrbb.b_initiator == IIO_ICRB_INIT_BTE0 ||
icrbb.b_initiator == IIO_ICRB_INIT_BTE1) &&
(icrbb.b_imsgtype == IIO_ICRB_IMSGT_BTE ||
icrbb.b_imsgtype == IIO_ICRB_IMSGT_SN1NET))) {
int bte_num;
if (icrbd.d_bteop)
bte_num = icrbc.c_btenum;
else /* b_initiator bit 2 gives BTE number */
bte_num = (icrbb.b_initiator & 0x4) >> 2;
hubiio_crb_free(hubdev_info, i);
bte_crb_error_handler(nasid_to_cnodeid(nasid), bte_num,
i, &ioerror, icrbd.d_bteop);
num_errors++;
continue;
}
}
}
/*
* Function : hub_error_init
* Purpose : initialize the error handling requirements for a given hub.
* Parameters : cnode, the compact nodeid.
* Assumptions : Called only once per hub, either by a local cpu. Or by a
* remote cpu, when this hub is headless.(cpuless)
* Returns : None
*/
void hub_error_init(struct hubdev_info *hubdev_info)
{
if (request_irq(SGI_II_ERROR, hub_eint_handler, IRQF_SHARED,
"SN_hub_error", hubdev_info)) {
printk(KERN_ERR "hub_error_init: Failed to request_irq for 0x%p\n",
hubdev_info);
return;
}
irq_set_handler(SGI_II_ERROR, handle_level_irq);
sn_set_err_irq_affinity(SGI_II_ERROR);
}
/*
* Function : ice_error_init
* Purpose : initialize the error handling requirements for a given tio.
* Parameters : cnode, the compact nodeid.
* Assumptions : Called only once per tio.
* Returns : None
*/
void ice_error_init(struct hubdev_info *hubdev_info)
{
if (request_irq
(SGI_TIO_ERROR, (void *)hub_eint_handler, IRQF_SHARED, "SN_TIO_error",
(void *)hubdev_info)) {
printk("ice_error_init: request_irq() error hubdev_info 0x%p\n",
hubdev_info);
return;
}
irq_set_handler(SGI_TIO_ERROR, handle_level_irq);
sn_set_err_irq_affinity(SGI_TIO_ERROR);
}

View file

@ -1,30 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/sn/leds.h>
void snidle(int state)
{
if (state) {
if (pda->idle_flag == 0) {
/*
* Turn the activity LED off.
*/
set_led_bits(0, LED_CPU_ACTIVITY);
}
pda->idle_flag = 1;
} else {
/*
* Turn the activity LED on.
*/
set_led_bits(LED_CPU_ACTIVITY, LED_CPU_ACTIVITY);
pda->idle_flag = 0;
}
}

View file

@ -1,513 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/sn/types.h>
#include <asm/sn/addrs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/sn_sal.h>
#include "xtalk/hubdev.h"
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/export.h>
/*
* The code in this file will only be executed when running with
* a PROM that has ACPI IO support. (i.e., SN_ACPI_BASE_SUPPORT() == 1)
*/
/*
* This value must match the UUID the PROM uses
* (io/acpi/defblk.c) when building a vendor descriptor.
*/
struct acpi_vendor_uuid sn_uuid = {
.subtype = 0,
.data = { 0x2c, 0xc6, 0xa6, 0xfe, 0x9c, 0x44, 0xda, 0x11,
0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 },
};
struct sn_pcidev_match {
u8 bus;
unsigned int devfn;
acpi_handle handle;
};
/*
* Perform the early IO init in PROM.
*/
static long
sal_ioif_init(u64 *result)
{
struct ia64_sal_retval isrv = {0,0,0,0};
SAL_CALL_NOLOCK(isrv,
SN_SAL_IOIF_INIT, 0, 0, 0, 0, 0, 0, 0);
*result = isrv.v0;
return isrv.status;
}
/*
* sn_acpi_hubdev_init() - This function is called by acpi_ns_get_device_callback()
* for all SGIHUB and SGITIO acpi devices defined in the
* DSDT. It obtains the hubdev_info pointer from the
* ACPI vendor resource, which the PROM setup, and sets up the
* hubdev_info in the pda.
*/
static acpi_status __init
sn_acpi_hubdev_init(acpi_handle handle, u32 depth, void *context, void **ret)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
u64 addr;
struct hubdev_info *hubdev;
struct hubdev_info *hubdev_ptr;
int i;
u64 nasid;
struct acpi_resource *resource;
acpi_status status;
struct acpi_resource_vendor_typed *vendor;
extern void sn_common_hubdev_init(struct hubdev_info *);
status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
&sn_uuid, &buffer);
if (ACPI_FAILURE(status)) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
printk(KERN_ERR
"sn_acpi_hubdev_init: acpi_get_vendor_resource() "
"(0x%x) failed for: %s\n", status,
(char *)name_buffer.pointer);
kfree(name_buffer.pointer);
return AE_OK; /* Continue walking namespace */
}
resource = buffer.pointer;
vendor = &resource->data.vendor_typed;
if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
sizeof(struct hubdev_info *)) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
printk(KERN_ERR
"sn_acpi_hubdev_init: Invalid vendor data length: "
"%d for: %s\n",
vendor->byte_length, (char *)name_buffer.pointer);
kfree(name_buffer.pointer);
goto exit;
}
memcpy(&addr, vendor->byte_data, sizeof(struct hubdev_info *));
hubdev_ptr = __va((struct hubdev_info *) addr);
nasid = hubdev_ptr->hdi_nasid;
i = nasid_to_cnodeid(nasid);
hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
*hubdev = *hubdev_ptr;
sn_common_hubdev_init(hubdev);
exit:
kfree(buffer.pointer);
return AE_OK; /* Continue walking namespace */
}
/*
* sn_get_bussoft_ptr() - The pcibus_bussoft pointer is found in
* the ACPI Vendor resource for this bus.
*/
static struct pcibus_bussoft *
sn_get_bussoft_ptr(struct pci_bus *bus)
{
u64 addr;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_handle handle;
struct pcibus_bussoft *prom_bussoft_ptr;
struct acpi_resource *resource;
acpi_status status;
struct acpi_resource_vendor_typed *vendor;
handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
&sn_uuid, &buffer);
if (ACPI_FAILURE(status)) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
printk(KERN_ERR "%s: "
"acpi_get_vendor_resource() failed (0x%x) for: %s\n",
__func__, status, (char *)name_buffer.pointer);
kfree(name_buffer.pointer);
return NULL;
}
resource = buffer.pointer;
vendor = &resource->data.vendor_typed;
if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
sizeof(struct pcibus_bussoft *)) {
printk(KERN_ERR
"%s: Invalid vendor data length %d\n",
__func__, vendor->byte_length);
kfree(buffer.pointer);
return NULL;
}
memcpy(&addr, vendor->byte_data, sizeof(struct pcibus_bussoft *));
prom_bussoft_ptr = __va((struct pcibus_bussoft *) addr);
kfree(buffer.pointer);
return prom_bussoft_ptr;
}
/*
* sn_extract_device_info - Extract the pcidev_info and the sn_irq_info
* pointers from the vendor resource using the
* provided acpi handle, and copy the structures
* into the argument buffers.
*/
static int
sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info,
struct sn_irq_info **sn_irq_info)
{
u64 addr;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct sn_irq_info *irq_info, *irq_info_prom;
struct pcidev_info *pcidev_ptr, *pcidev_prom_ptr;
struct acpi_resource *resource;
int ret = 0;
acpi_status status;
struct acpi_resource_vendor_typed *vendor;
/*
* The pointer to this device's pcidev_info structure in
* the PROM, is in the vendor resource.
*/
status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
&sn_uuid, &buffer);
if (ACPI_FAILURE(status)) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
printk(KERN_ERR
"%s: acpi_get_vendor_resource() failed (0x%x) for: %s\n",
__func__, status, (char *)name_buffer.pointer);
kfree(name_buffer.pointer);
return 1;
}
resource = buffer.pointer;
vendor = &resource->data.vendor_typed;
if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
sizeof(struct pci_devdev_info *)) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
printk(KERN_ERR
"%s: Invalid vendor data length: %d for: %s\n",
__func__, vendor->byte_length,
(char *)name_buffer.pointer);
kfree(name_buffer.pointer);
ret = 1;
goto exit;
}
pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_ptr)
panic("%s: Unable to alloc memory for pcidev_info", __func__);
memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *));
pcidev_prom_ptr = __va(addr);
memcpy(pcidev_ptr, pcidev_prom_ptr, sizeof(struct pcidev_info));
/* Get the IRQ info */
irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!irq_info)
panic("%s: Unable to alloc memory for sn_irq_info", __func__);
if (pcidev_ptr->pdi_sn_irq_info) {
irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info);
memcpy(irq_info, irq_info_prom, sizeof(struct sn_irq_info));
}
*pcidev_info = pcidev_ptr;
*sn_irq_info = irq_info;
exit:
kfree(buffer.pointer);
return ret;
}
static unsigned int
get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle)
{
unsigned long long adr;
acpi_handle child;
unsigned int devfn;
int function;
acpi_handle parent;
int slot;
acpi_status status;
struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_get_name(device_handle, ACPI_FULL_PATHNAME, &name_buffer);
/*
* Do an upward search to find the root bus device, and
* obtain the host devfn from the previous child device.
*/
child = device_handle;
while (child) {
status = acpi_get_parent(child, &parent);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR "%s: acpi_get_parent() failed "
"(0x%x) for: %s\n", __func__, status,
(char *)name_buffer.pointer);
panic("%s: Unable to find host devfn\n", __func__);
}
if (parent == rootbus_handle)
break;
child = parent;
}
if (!child) {
printk(KERN_ERR "%s: Unable to find root bus for: %s\n",
__func__, (char *)name_buffer.pointer);
BUG();
}
status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: %s\n",
__func__, status, (char *)name_buffer.pointer);
panic("%s: Unable to find host devfn\n", __func__);
}
kfree(name_buffer.pointer);
slot = (adr >> 16) & 0xffff;
function = adr & 0xffff;
devfn = PCI_DEVFN(slot, function);
return devfn;
}
/*
* find_matching_device - Callback routine to find the ACPI device
* that matches up with our pci_dev device.
* Matching is done on bus number and devfn.
* To find the bus number for a particular
* ACPI device, we must look at the _BBN method
* of its parent.
*/
static acpi_status
find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv)
{
unsigned long long bbn = -1;
unsigned long long adr;
acpi_handle parent = NULL;
acpi_status status;
unsigned int devfn;
int function;
int slot;
struct sn_pcidev_match *info = context;
struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
&adr);
if (ACPI_SUCCESS(status)) {
status = acpi_get_parent(handle, &parent);
if (ACPI_FAILURE(status)) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
printk(KERN_ERR
"%s: acpi_get_parent() failed (0x%x) for: %s\n",
__func__, status, (char *)name_buffer.pointer);
kfree(name_buffer.pointer);
return AE_OK;
}
status = acpi_evaluate_integer(parent, METHOD_NAME__BBN,
NULL, &bbn);
if (ACPI_FAILURE(status)) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &name_buffer);
printk(KERN_ERR
"%s: Failed to find _BBN in parent of: %s\n",
__func__, (char *)name_buffer.pointer);
kfree(name_buffer.pointer);
return AE_OK;
}
slot = (adr >> 16) & 0xffff;
function = adr & 0xffff;
devfn = PCI_DEVFN(slot, function);
if ((info->devfn == devfn) && (info->bus == bbn)) {
/* We have a match! */
info->handle = handle;
return 1;
}
}
return AE_OK;
}
/*
* sn_acpi_get_pcidev_info - Search ACPI namespace for the acpi
* device matching the specified pci_dev,
* and return the pcidev info and irq info.
*/
int
sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
struct sn_irq_info **sn_irq_info)
{
unsigned int host_devfn;
struct sn_pcidev_match pcidev_match;
acpi_handle rootbus_handle;
unsigned long long segment;
acpi_status status;
struct acpi_buffer name_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
rootbus_handle = acpi_device_handle(PCI_CONTROLLER(dev)->companion);
status = acpi_evaluate_integer(rootbus_handle, METHOD_NAME__SEG, NULL,
&segment);
if (ACPI_SUCCESS(status)) {
if (segment != pci_domain_nr(dev)) {
acpi_get_name(rootbus_handle, ACPI_FULL_PATHNAME,
&name_buffer);
printk(KERN_ERR
"%s: Segment number mismatch, 0x%llx vs 0x%x for: %s\n",
__func__, segment, pci_domain_nr(dev),
(char *)name_buffer.pointer);
kfree(name_buffer.pointer);
return 1;
}
} else {
acpi_get_name(rootbus_handle, ACPI_FULL_PATHNAME, &name_buffer);
printk(KERN_ERR "%s: Unable to get __SEG from: %s\n",
__func__, (char *)name_buffer.pointer);
kfree(name_buffer.pointer);
return 1;
}
/*
* We want to search all devices in this segment/domain
* of the ACPI namespace for the matching ACPI device,
* which holds the pcidev_info pointer in its vendor resource.
*/
pcidev_match.bus = dev->bus->number;
pcidev_match.devfn = dev->devfn;
pcidev_match.handle = NULL;
acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX,
find_matching_device, NULL, &pcidev_match, NULL);
if (!pcidev_match.handle) {
printk(KERN_ERR
"%s: Could not find matching ACPI device for %s.\n",
__func__, pci_name(dev));
return 1;
}
if (sn_extract_device_info(pcidev_match.handle, pcidev_info, sn_irq_info))
return 1;
/* Build up the pcidev_info.pdi_slot_host_handle */
host_devfn = get_host_devfn(pcidev_match.handle, rootbus_handle);
(*pcidev_info)->pdi_slot_host_handle =
((unsigned long) pci_domain_nr(dev) << 40) |
/* bus == 0 */
host_devfn;
return 0;
}
/*
* sn_acpi_slot_fixup - Obtain the pcidev_info and sn_irq_info.
* Perform any SN specific slot fixup.
* At present there does not appear to be
* any generic way to handle a ROM image
* that has been shadowed by the PROM, so
* we pass a pointer to it within the
* pcidev_info structure.
*/
void
sn_acpi_slot_fixup(struct pci_dev *dev)
{
struct pcidev_info *pcidev_info = NULL;
struct sn_irq_info *sn_irq_info = NULL;
struct resource *res;
size_t size;
if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) {
panic("%s: Failure obtaining pcidev_info for %s\n",
__func__, pci_name(dev));
}
if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
/*
* A valid ROM image exists and has been shadowed by the
* PROM. Setup the pci_dev ROM resource with the address
* of the shadowed copy, and the actual length of the ROM image.
*/
size = pci_resource_len(dev, PCI_ROM_RESOURCE);
res = &dev->resource[PCI_ROM_RESOURCE];
pci_disable_rom(dev);
if (res->parent)
release_resource(res);
res->start = pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE];
res->end = res->start + size - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
IORESOURCE_PCI_FIXED;
}
sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
}
EXPORT_SYMBOL(sn_acpi_slot_fixup);
/*
* sn_acpi_bus_fixup - Perform SN specific setup of software structs
* (pcibus_bussoft, pcidev_info) and hardware
* registers, for the specified bus and devices under it.
*/
void
sn_acpi_bus_fixup(struct pci_bus *bus)
{
struct pci_dev *pci_dev = NULL;
struct pcibus_bussoft *prom_bussoft_ptr;
if (!bus->parent) { /* If root bus */
prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
if (prom_bussoft_ptr == NULL) {
printk(KERN_ERR
"%s: 0x%04x:0x%02x Unable to "
"obtain prom_bussoft_ptr\n",
__func__, pci_domain_nr(bus), bus->number);
return;
}
sn_common_bus_fixup(bus, prom_bussoft_ptr);
}
list_for_each_entry(pci_dev, &bus->devices, bus_list) {
sn_acpi_slot_fixup(pci_dev);
}
}
/*
* sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the
* nodes and root buses in the DSDT. As a result, bus scanning
* will be initiated by the Linux ACPI code.
*/
void __init
sn_io_acpi_init(void)
{
u64 result;
long status;
/* SN Altix does not follow the IOSAPIC IRQ routing model */
acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
/* Setup hubdev_info for all SGIHUB/SGITIO devices */
acpi_get_devices("SGIHUB", sn_acpi_hubdev_init, NULL, NULL);
acpi_get_devices("SGITIO", sn_acpi_hubdev_init, NULL, NULL);
status = sal_ioif_init(&result);
if (status || result)
panic("sal_ioif_init failed: [%lx] %s\n",
status, ia64_sal_strerror(status));
}

View file

@ -1,561 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/sn/types.h>
#include <asm/sn/addrs.h>
#include <asm/sn/sn_feature_sets.h>
#include <asm/sn/geo.h>
#include <asm/sn/io.h>
#include <asm/sn/l1.h>
#include <asm/sn/module.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/simulator.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/tioca_provider.h>
#include <asm/sn/tioce_provider.h>
#include "xtalk/hubdev.h"
#include "xtalk/xwidgetdev.h"
#include <linux/acpi.h>
#include <asm/sn/sn2/sn_hwperf.h>
#include <asm/sn/acpi.h>
extern void sn_init_cpei_timer(void);
extern void register_sn_procfs(void);
extern void sn_io_acpi_init(void);
extern void sn_io_init(void);
static struct list_head sn_sysdata_list;
/* sysdata list struct */
struct sysdata_el {
struct list_head entry;
void *sysdata;
};
int sn_ioif_inited; /* SN I/O infrastructure initialized? */
int sn_acpi_rev; /* SN ACPI revision */
EXPORT_SYMBOL_GPL(sn_acpi_rev);
struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
/*
* Hooks and struct for unsupported pci providers
*/
static dma_addr_t
sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
{
return 0;
}
static void
sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
{
return;
}
static void *
sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
{
return NULL;
}
static struct sn_pcibus_provider sn_pci_default_provider = {
.dma_map = sn_default_pci_map,
.dma_map_consistent = sn_default_pci_map,
.dma_unmap = sn_default_pci_unmap,
.bus_fixup = sn_default_pci_bus_fixup,
};
/*
* Retrieve the DMA Flush List given nasid, widget, and device.
* This list is needed to implement the WAR - Flush DMA data on PIO Reads.
*/
static inline u64
sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
(u64) nasid, (u64) widget_num,
(u64) device_num, (u64) address, 0, 0, 0);
return ret_stuff.status;
}
/*
* sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
* device.
*/
inline struct pcidev_info *
sn_pcidev_info_get(struct pci_dev *dev)
{
struct pcidev_info *pcidev;
list_for_each_entry(pcidev,
&(SN_PLATFORM_DATA(dev)->pcidev_info), pdi_list) {
if (pcidev->pdi_linux_pcidev == dev)
return pcidev;
}
return NULL;
}
/* Older PROM flush WAR
*
* 01/16/06 -- This war will be in place until a new official PROM is released.
* Additionally note that the struct sn_flush_device_war also has to be
* removed from arch/ia64/sn/include/xtalk/hubdev.h
*/
static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
struct sn_flush_device_common *common)
{
struct sn_flush_device_war *war_list;
struct sn_flush_device_war *dev_entry;
struct ia64_sal_retval isrv = {0,0,0,0};
printk_once(KERN_WARNING
"PROM version < 4.50 -- implementing old PROM flush WAR\n");
war_list = kcalloc(DEV_PER_WIDGET, sizeof(*war_list), GFP_KERNEL);
BUG_ON(!war_list);
SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
nasid, widget, __pa(war_list), 0, 0, 0 ,0);
if (isrv.status)
panic("sn_device_fixup_war failed: %s\n",
ia64_sal_strerror(isrv.status));
dev_entry = war_list + device;
memcpy(common,dev_entry, sizeof(*common));
kfree(war_list);
return isrv.status;
}
/*
* sn_common_hubdev_init() - This routine is called to initialize the HUB data
* structure for each node in the system.
*/
void __init
sn_common_hubdev_init(struct hubdev_info *hubdev)
{
struct sn_flush_device_kernel *sn_flush_device_kernel;
struct sn_flush_device_kernel *dev_entry;
s64 status;
int widget, device, size;
/* Attach the error interrupt handlers */
if (hubdev->hdi_nasid & 1) /* If TIO */
ice_error_init(hubdev);
else
hub_error_init(hubdev);
for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
if (!hubdev->hdi_flush_nasid_list.widget_p)
return;
size = (HUB_WIDGET_ID_MAX + 1) *
sizeof(struct sn_flush_device_kernel *);
hubdev->hdi_flush_nasid_list.widget_p =
kzalloc(size, GFP_KERNEL);
BUG_ON(!hubdev->hdi_flush_nasid_list.widget_p);
for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
size = DEV_PER_WIDGET *
sizeof(struct sn_flush_device_kernel);
sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
BUG_ON(!sn_flush_device_kernel);
dev_entry = sn_flush_device_kernel;
for (device = 0; device < DEV_PER_WIDGET;
device++, dev_entry++) {
size = sizeof(struct sn_flush_device_common);
dev_entry->common = kzalloc(size, GFP_KERNEL);
BUG_ON(!dev_entry->common);
if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST))
status = sal_get_device_dmaflush_list(
hubdev->hdi_nasid, widget, device,
(u64)(dev_entry->common));
else
status = sn_device_fixup_war(hubdev->hdi_nasid,
widget, device,
dev_entry->common);
if (status != SALRET_OK)
panic("SAL call failed: %s\n",
ia64_sal_strerror(status));
spin_lock_init(&dev_entry->sfdl_flush_lock);
}
if (sn_flush_device_kernel)
hubdev->hdi_flush_nasid_list.widget_p[widget] =
sn_flush_device_kernel;
}
}
void sn_pci_unfixup_slot(struct pci_dev *dev)
{
struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
sn_irq_unfixup(dev);
pci_dev_put(host_pci_dev);
pci_dev_put(dev);
}
/*
* sn_pci_fixup_slot()
*/
void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *pcidev_info,
struct sn_irq_info *sn_irq_info)
{
int segment = pci_domain_nr(dev->bus);
struct pcibus_bussoft *bs;
struct pci_dev *host_pci_dev;
unsigned int bus_no, devfn;
pci_dev_get(dev); /* for the sysdata pointer */
/* Add pcidev_info to list in pci_controller.platform_data */
list_add_tail(&pcidev_info->pdi_list,
&(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
/*
* Using the PROMs values for the PCI host bus, get the Linux
* PCI host_pci_dev struct and set up host bus linkages
*/
bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
host_pci_dev = pci_get_domain_bus_and_slot(segment, bus_no, devfn);
pcidev_info->host_pci_dev = host_pci_dev;
pcidev_info->pdi_linux_pcidev = dev;
pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
bs = SN_PCIBUS_BUSSOFT(dev->bus);
pcidev_info->pdi_pcibus_info = bs;
if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
} else {
SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
}
/* Only set up IRQ stuff if this device has a host bus context */
if (bs && sn_irq_info->irq_irq) {
pcidev_info->pdi_sn_irq_info = sn_irq_info;
dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
sn_irq_fixup(dev, sn_irq_info);
} else {
pcidev_info->pdi_sn_irq_info = NULL;
kfree(sn_irq_info);
}
}
/*
* sn_common_bus_fixup - Perform platform specific bus fixup.
* Execute the ASIC specific fixup routine
* for this bus.
*/
void
sn_common_bus_fixup(struct pci_bus *bus,
struct pcibus_bussoft *prom_bussoft_ptr)
{
int cnode;
struct pci_controller *controller;
struct hubdev_info *hubdev_info;
int nasid;
void *provider_soft;
struct sn_pcibus_provider *provider;
struct sn_platform_data *sn_platform_data;
controller = PCI_CONTROLLER(bus);
/*
* Per-provider fixup. Copies the bus soft structure from prom
* to local area and links SN_PCIBUS_BUSSOFT().
*/
if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) {
printk(KERN_WARNING "sn_common_bus_fixup: Unsupported asic type, %d",
prom_bussoft_ptr->bs_asic_type);
return;
}
if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
return; /* no further fixup necessary */
provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
if (provider == NULL)
panic("sn_common_bus_fixup: No provider registered for this asic type, %d",
prom_bussoft_ptr->bs_asic_type);
if (provider->bus_fixup)
provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr,
controller);
else
provider_soft = NULL;
/*
* Generic bus fixup goes here. Don't reference prom_bussoft_ptr
* after this point.
*/
controller->platform_data = kzalloc(sizeof(struct sn_platform_data),
GFP_KERNEL);
BUG_ON(controller->platform_data == NULL);
sn_platform_data =
(struct sn_platform_data *) controller->platform_data;
sn_platform_data->provider_soft = provider_soft;
INIT_LIST_HEAD(&((struct sn_platform_data *)
controller->platform_data)->pcidev_info);
nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
cnode = nasid_to_cnodeid(nasid);
hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
&(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
/*
* If the node information we obtained during the fixup phase is
* invalid then set controller->node to -1 (undetermined)
*/
if (controller->node >= num_online_nodes()) {
struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u "
"L_IO=%llx L_MEM=%llx BASE=%llx\n",
b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
printk(KERN_WARNING "on node %d but only %d nodes online."
"Association set to undetermined.\n",
controller->node, num_online_nodes());
controller->node = -1;
}
}
void sn_bus_store_sysdata(struct pci_dev *dev)
{
struct sysdata_el *element;
element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
if (!element) {
dev_dbg(&dev->dev, "%s: out of memory!\n", __func__);
return;
}
element->sysdata = SN_PCIDEV_INFO(dev);
list_add(&element->entry, &sn_sysdata_list);
}
void sn_bus_free_sysdata(void)
{
struct sysdata_el *element;
struct list_head *list, *safe;
list_for_each_safe(list, safe, &sn_sysdata_list) {
element = list_entry(list, struct sysdata_el, entry);
list_del(&element->entry);
list_del(&(((struct pcidev_info *)
(element->sysdata))->pdi_list));
kfree(element->sysdata);
kfree(element);
}
return;
}
/*
* hubdev_init_node() - Creates the HUB data structure and link them to it's
* own NODE specific data area.
*/
void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node)
{
struct hubdev_info *hubdev_info;
int size;
size = sizeof(struct hubdev_info);
if (node >= num_online_nodes()) /* Headless/memless IO nodes */
node = 0;
hubdev_info = (struct hubdev_info *)memblock_alloc_node(size,
SMP_CACHE_BYTES,
node);
if (!hubdev_info)
panic("%s: Failed to allocate %d bytes align=0x%x nid=%d\n",
__func__, size, SMP_CACHE_BYTES, node);
npda->pdinfo = (void *)hubdev_info;
}
geoid_t
cnodeid_get_geoid(cnodeid_t cnode)
{
struct hubdev_info *hubdev;
hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
return hubdev->hdi_geoid;
}
void sn_generate_path(struct pci_bus *pci_bus, char *address)
{
nasid_t nasid;
cnodeid_t cnode;
geoid_t geoid;
moduleid_t moduleid;
u16 bricktype;
nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
cnode = nasid_to_cnodeid(nasid);
geoid = cnodeid_get_geoid(cnode);
moduleid = geo_module(geoid);
sprintf(address, "module_%c%c%c%c%.2d",
'0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
'0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
'0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
/* Tollhouse requires slot id to be displayed */
bricktype = MODULE_GET_BTYPE(moduleid);
if ((bricktype == L1_BRICKTYPE_191010) ||
(bricktype == L1_BRICKTYPE_1932))
sprintf(address + strlen(address), "^%d",
geo_slot(geoid));
}
void sn_pci_fixup_bus(struct pci_bus *bus)
{
if (SN_ACPI_BASE_SUPPORT())
sn_acpi_bus_fixup(bus);
else
sn_bus_fixup(bus);
}
/*
* sn_io_early_init - Perform early IO (and some non-IO) initialization.
* In particular, setup the sn_pci_provider[] array.
* This needs to be done prior to any bus scanning
* (acpi_scan_init()) in the ACPI case, as the SN
* bus fixup code will reference the array.
*/
static int __init
sn_io_early_init(void)
{
int i;
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
return 0;
/* we set the acpi revision to that of the DSDT table OEM rev. */
{
struct acpi_table_header *header = NULL;
acpi_get_table(ACPI_SIG_DSDT, 1, &header);
BUG_ON(header == NULL);
sn_acpi_rev = header->oem_revision;
}
/*
* prime sn_pci_provider[]. Individual provider init routines will
* override their respective default entries.
*/
for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
sn_pci_provider[i] = &sn_pci_default_provider;
pcibr_init_provider();
tioca_init_provider();
tioce_init_provider();
sn_irq_lh_init();
INIT_LIST_HEAD(&sn_sysdata_list);
sn_init_cpei_timer();
#ifdef CONFIG_PROC_FS
register_sn_procfs();
#endif
{
struct acpi_table_header *header;
(void)acpi_get_table(ACPI_SIG_DSDT, 1, &header);
printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
header->oem_revision);
}
if (SN_ACPI_BASE_SUPPORT())
sn_io_acpi_init();
else
sn_io_init();
return 0;
}
arch_initcall(sn_io_early_init);
/*
* sn_io_late_init() - Perform any final platform specific IO initialization.
*/
int __init
sn_io_late_init(void)
{
struct pci_bus *bus;
struct pcibus_bussoft *bussoft;
cnodeid_t cnode;
nasid_t nasid;
cnodeid_t near_cnode;
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
return 0;
/*
* Setup closest node in pci_controller->node for
* PIC, TIOCP, TIOCE (TIOCA does it during bus fixup using
* info from the PROM).
*/
bus = NULL;
while ((bus = pci_find_next_bus(bus)) != NULL) {
bussoft = SN_PCIBUS_BUSSOFT(bus);
nasid = NASID_GET(bussoft->bs_base);
cnode = nasid_to_cnodeid(nasid);
if ((bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) ||
(bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCE) ||
(bussoft->bs_asic_type == PCIIO_ASIC_TYPE_PIC)) {
/* PCI Bridge: find nearest node with CPUs */
int e = sn_hwperf_get_nearest_node(cnode, NULL,
&near_cnode);
if (e < 0) {
near_cnode = (cnodeid_t)-1; /* use any node */
printk(KERN_WARNING "sn_io_late_init: failed "
"to find near node with CPUs for "
"node %d, err=%d\n", cnode, e);
}
PCI_CONTROLLER(bus)->node = near_cnode;
}
}
sn_ioif_inited = 1; /* SN I/O infrastructure now initialized */
return 0;
}
fs_initcall(sn_io_late_init);
EXPORT_SYMBOL(sn_pci_unfixup_slot);
EXPORT_SYMBOL(sn_bus_store_sysdata);
EXPORT_SYMBOL(sn_bus_free_sysdata);
EXPORT_SYMBOL(sn_generate_path);

View file

@ -1,308 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/sn/types.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
#include <asm/sn/module.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
#include "xtalk/hubdev.h"
/*
* The code in this file will only be executed when running with
* a PROM that does _not_ have base ACPI IO support.
* (i.e., SN_ACPI_BASE_SUPPORT() == 0)
*/
static int max_segment_number; /* Default highest segment number */
static int max_pcibus_number = 255; /* Default highest pci bus number */
/*
* Retrieve the hub device info structure for the given nasid.
*/
static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
(u64) handle, (u64) address, 0, 0, 0, 0, 0);
return ret_stuff.v0;
}
/*
* Retrieve the pci bus information given the bus number.
*/
static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
(u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
return ret_stuff.v0;
}
/*
* Retrieve the pci device information given the bus and device|function number.
*/
static inline u64
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
u64 sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
(u64) segment, (u64) bus_number, (u64) devfn,
(u64) pci_dev,
sn_irq_info, 0, 0);
return ret_stuff.v0;
}
/*
* sn_fixup_ionodes() - This routine initializes the HUB data structure for
* each node in the system. This function is only
* executed when running with a non-ACPI capable PROM.
*/
static void __init sn_fixup_ionodes(void)
{
struct hubdev_info *hubdev;
u64 status;
u64 nasid;
int i;
extern void sn_common_hubdev_init(struct hubdev_info *);
/*
* Get SGI Specific HUB chipset information.
* Inform Prom that this kernel can support domain bus numbering.
*/
for (i = 0; i < num_cnodes; i++) {
hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
nasid = cnodeid_to_nasid(i);
hubdev->max_segment_number = 0xffffffff;
hubdev->max_pcibus_number = 0xff;
status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev));
if (status)
continue;
/* Save the largest Domain and pcibus numbers found. */
if (hubdev->max_segment_number) {
/*
* Dealing with a Prom that supports segments.
*/
max_segment_number = hubdev->max_segment_number;
max_pcibus_number = hubdev->max_pcibus_number;
}
sn_common_hubdev_init(hubdev);
}
}
/*
* sn_pci_legacy_window_fixup - Setup PCI resources for
* legacy IO and MEM space. This needs to
* be done here, as the PROM does not have
* ACPI support defining the root buses
* and their resources (_CRS),
*/
static void
sn_legacy_pci_window_fixup(struct resource *res,
u64 legacy_io, u64 legacy_mem)
{
res[0].name = "legacy_io";
res[0].flags = IORESOURCE_IO;
res[0].start = legacy_io;
res[0].end = res[0].start + 0xffff;
res[0].parent = &ioport_resource;
res[1].name = "legacy_mem";
res[1].flags = IORESOURCE_MEM;
res[1].start = legacy_mem;
res[1].end = res[1].start + (1024 * 1024) - 1;
res[1].parent = &iomem_resource;
}
/*
* sn_io_slot_fixup() - We are not running with an ACPI capable PROM,
* and need to convert the pci_dev->resource
* 'start' and 'end' addresses to mapped addresses,
* and setup the pci_controller->window array entries.
*/
void
sn_io_slot_fixup(struct pci_dev *dev)
{
int idx;
struct resource *res;
unsigned long size;
struct pcidev_info *pcidev_info;
struct sn_irq_info *sn_irq_info;
int status;
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_info)
panic("%s: Unable to alloc memory for pcidev_info", __func__);
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!sn_irq_info)
panic("%s: Unable to alloc memory for sn_irq_info", __func__);
/* Call to retrieve pci device information needed by kernel. */
status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
(u64) dev->bus->number,
dev->devfn,
(u64) __pa(pcidev_info),
(u64) __pa(sn_irq_info));
BUG_ON(status); /* Cannot get platform pci device information */
/* Copy over PIO Mapped Addresses */
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
if (!pcidev_info->pdi_pio_mapped_addr[idx])
continue;
res = &dev->resource[idx];
size = res->end - res->start;
if (size == 0)
continue;
res->start = pcidev_info->pdi_pio_mapped_addr[idx];
res->end = res->start + size;
/*
* if it's already in the device structure, remove it before
* inserting
*/
if (res->parent && res->parent->child)
release_resource(res);
if (res->flags & IORESOURCE_IO)
insert_resource(&ioport_resource, res);
else
insert_resource(&iomem_resource, res);
/*
* If ROM, mark as shadowed in PROM.
*/
if (idx == PCI_ROM_RESOURCE) {
pci_disable_rom(dev);
res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
IORESOURCE_PCI_FIXED;
}
}
sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
}
EXPORT_SYMBOL(sn_io_slot_fixup);
/*
* sn_pci_controller_fixup() - This routine sets up a bus's resources
* consistent with the Linux PCI abstraction layer.
*/
static void __init
sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
{
s64 status = 0;
struct pci_controller *controller;
struct pcibus_bussoft *prom_bussoft_ptr;
struct resource *res;
LIST_HEAD(resources);
status = sal_get_pcibus_info((u64) segment, (u64) busnum,
(u64) ia64_tpa(&prom_bussoft_ptr));
if (status > 0)
return; /*bus # does not exist */
prom_bussoft_ptr = __va(prom_bussoft_ptr);
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
BUG_ON(!controller);
controller->segment = segment;
res = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
BUG_ON(!res);
/*
* Temporarily save the prom_bussoft_ptr for use by sn_bus_fixup().
* (platform_data will be overwritten later in sn_common_bus_fixup())
*/
controller->platform_data = prom_bussoft_ptr;
sn_legacy_pci_window_fixup(res,
prom_bussoft_ptr->bs_legacy_io,
prom_bussoft_ptr->bs_legacy_mem);
pci_add_resource_offset(&resources, &res[0],
prom_bussoft_ptr->bs_legacy_io);
pci_add_resource_offset(&resources, &res[1],
prom_bussoft_ptr->bs_legacy_mem);
bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, controller,
&resources);
if (bus == NULL) {
kfree(res);
kfree(controller);
return;
}
pci_bus_add_devices(bus);
}
/*
* sn_bus_fixup
*/
void
sn_bus_fixup(struct pci_bus *bus)
{
struct pci_dev *pci_dev = NULL;
struct pcibus_bussoft *prom_bussoft_ptr;
if (!bus->parent) { /* If root bus */
prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
if (prom_bussoft_ptr == NULL) {
printk(KERN_ERR
"sn_bus_fixup: 0x%04x:0x%02x Unable to "
"obtain prom_bussoft_ptr\n",
pci_domain_nr(bus), bus->number);
return;
}
sn_common_bus_fixup(bus, prom_bussoft_ptr);
}
list_for_each_entry(pci_dev, &bus->devices, bus_list) {
sn_io_slot_fixup(pci_dev);
}
}
/*
* sn_io_init - PROM does not have ACPI support to define nodes or root buses,
* so we need to do things the hard way, including initiating the
* bus scanning ourselves.
*/
void __init sn_io_init(void)
{
int i, j;
sn_fixup_ionodes();
/* busses are not known yet ... */
for (i = 0; i <= max_segment_number; i++)
for (j = 0; j <= max_pcibus_number; j++)
sn_pci_controller_fixup(i, j, NULL);
}

View file

@ -1,82 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003, 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/acpi.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/vga.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/simulator.h>
#include <asm/sn/pda.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/acpi.h>
#define IS_LEGACY_VGA_IOPORT(p) \
(((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
/**
* sn_io_addr - convert an in/out port to an i/o address
* @port: port to convert
*
* Legacy in/out instructions are converted to ld/st instructions
* on IA64. This routine will convert a port number into a valid
* SN i/o address. Used by sn_in*() and sn_out*().
*/
void *sn_io_addr(unsigned long port)
{
if (!IS_RUNNING_ON_SIMULATOR()) {
if (IS_LEGACY_VGA_IOPORT(port))
return (__ia64_mk_io_addr(port));
/* On sn2, legacy I/O ports don't point at anything */
if (port < (64 * 1024))
return NULL;
if (SN_ACPI_BASE_SUPPORT())
return (__ia64_mk_io_addr(port));
else
return ((void *)(port | __IA64_UNCACHED_OFFSET));
} else {
/* but the simulator uses them... */
unsigned long addr;
/*
* word align port, but need more than 10 bits
* for accessing registers in bedrock local block
* (so we don't do port&0xfff)
*/
addr = (is_shub2() ? 0xc00000028c000000UL : 0xc0000087cc000000UL) | ((port >> 2) << 12);
if ((port >= 0x1f0 && port <= 0x1f7) || port == 0x3f6 || port == 0x3f7)
addr |= port;
return (void *)addr;
}
}
EXPORT_SYMBOL(sn_io_addr);
/**
* __sn_mmiowb - I/O space memory barrier
*
* See arch/ia64/include/asm/io.h and Documentation/driver-api/device-io.rst
* for details.
*
* On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
* See PV 871084 for details about the WAR about zero value.
*
*/
void __sn_mmiowb(void)
{
volatile unsigned long *adr = pda->pio_write_status_addr;
unsigned long val = pda->pio_write_status_val;
while ((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val)
cpu_relax();
}
EXPORT_SYMBOL(__sn_mmiowb);

View file

@ -1,489 +0,0 @@
/*
* Platform dependent support for SGI SN
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/sn_feature_sets.h>
static void register_intr_pda(struct sn_irq_info *sn_irq_info);
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
extern int sn_ioif_inited;
struct list_head **sn_irq_lh;
static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info,
int req_irq, nasid_t req_nasid,
int req_slice)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_ALLOC, (u64) local_nasid,
(u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
(u64) req_nasid, (u64) req_slice);
return ret_stuff.status;
}
void sn_intr_free(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_FREE, (u64) local_nasid,
(u64) local_widget, (u64) sn_irq_info->irq_irq,
(u64) sn_irq_info->irq_cookie, 0, 0);
}
u64 sn_intr_redirect(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info,
nasid_t req_nasid, int req_slice)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_REDIRECT, (u64) local_nasid,
(u64) local_widget, __pa(sn_irq_info),
(u64) req_nasid, (u64) req_slice, 0);
return ret_stuff.status;
}
static unsigned int sn_startup_irq(struct irq_data *data)
{
return 0;
}
static void sn_shutdown_irq(struct irq_data *data)
{
}
extern void ia64_mca_register_cpev(int);
static void sn_disable_irq(struct irq_data *data)
{
if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR))
ia64_mca_register_cpev(0);
}
static void sn_enable_irq(struct irq_data *data)
{
if (data->irq == local_vector_to_irq(IA64_CPE_VECTOR))
ia64_mca_register_cpev(data->irq);
}
static void sn_ack_irq(struct irq_data *data)
{
u64 event_occurred, mask;
unsigned int irq = data->irq & 0xff;
event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
mask = event_occurred & SH_ALL_INT_MASK;
HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
irq_move_irq(data);
}
struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
nasid_t nasid, int slice)
{
int vector;
int cpuid;
#ifdef CONFIG_SMP
int cpuphys;
#endif
int64_t bridge;
int local_widget, status;
nasid_t local_nasid;
struct sn_irq_info *new_irq_info;
struct sn_pcibus_provider *pci_provider;
bridge = (u64) sn_irq_info->irq_bridge;
if (!bridge) {
return NULL; /* irq is not a device interrupt */
}
local_nasid = NASID_GET(bridge);
if (local_nasid & 1)
local_widget = TIO_SWIN_WIDGETNUM(bridge);
else
local_widget = SWIN_WIDGETNUM(bridge);
vector = sn_irq_info->irq_irq;
/* Make use of SAL_INTR_REDIRECT if PROM supports it */
status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice);
if (!status) {
new_irq_info = sn_irq_info;
goto finish_up;
}
/*
* PROM does not support SAL_INTR_REDIRECT, or it failed.
* Revert to old method.
*/
new_irq_info = kmemdup(sn_irq_info, sizeof(struct sn_irq_info),
GFP_ATOMIC);
if (new_irq_info == NULL)
return NULL;
/* Free the old PROM new_irq_info structure */
sn_intr_free(local_nasid, local_widget, new_irq_info);
unregister_intr_pda(new_irq_info);
/* allocate a new PROM new_irq_info struct */
status = sn_intr_alloc(local_nasid, local_widget,
new_irq_info, vector,
nasid, slice);
/* SAL call failed */
if (status) {
kfree(new_irq_info);
return NULL;
}
register_intr_pda(new_irq_info);
spin_lock(&sn_irq_info_lock);
list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
spin_unlock(&sn_irq_info_lock);
kfree_rcu(sn_irq_info, rcu);
finish_up:
/* Update kernels new_irq_info with new target info */
cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
new_irq_info->irq_slice);
new_irq_info->irq_cpuid = cpuid;
pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
/*
* If this represents a line interrupt, target it. If it's
* an msi (irq_int_bit < 0), it's already targeted.
*/
if (new_irq_info->irq_int_bit >= 0 &&
pci_provider && pci_provider->target_interrupt)
(pci_provider->target_interrupt)(new_irq_info);
#ifdef CONFIG_SMP
cpuphys = cpu_physical_id(cpuid);
set_irq_affinity_info((vector & 0xff), cpuphys, 0);
#endif
return new_irq_info;
}
static int sn_set_affinity_irq(struct irq_data *data,
const struct cpumask *mask, bool force)
{
struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
unsigned int irq = data->irq;
nasid_t nasid;
int slice;
nasid = cpuid_to_nasid(cpumask_first_and(mask, cpu_online_mask));
slice = cpuid_to_slice(cpumask_first_and(mask, cpu_online_mask));
list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
sn_irq_lh[irq], list)
(void)sn_retarget_vector(sn_irq_info, nasid, slice);
return 0;
}
#ifdef CONFIG_SMP
void sn_set_err_irq_affinity(unsigned int irq)
{
/*
* On systems which support CPU disabling (SHub2), all error interrupts
* are targeted at the boot CPU.
*/
if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT))
set_irq_affinity_info(irq, cpu_physical_id(0), 0);
}
#else
void sn_set_err_irq_affinity(unsigned int irq) { }
#endif
static void
sn_mask_irq(struct irq_data *data)
{
}
static void
sn_unmask_irq(struct irq_data *data)
{
}
struct irq_chip irq_type_sn = {
.name = "SN hub",
.irq_startup = sn_startup_irq,
.irq_shutdown = sn_shutdown_irq,
.irq_enable = sn_enable_irq,
.irq_disable = sn_disable_irq,
.irq_ack = sn_ack_irq,
.irq_mask = sn_mask_irq,
.irq_unmask = sn_unmask_irq,
.irq_set_affinity = sn_set_affinity_irq
};
ia64_vector sn_irq_to_vector(int irq)
{
if (irq >= IA64_NUM_VECTORS)
return 0;
return (ia64_vector)irq;
}
unsigned int sn_local_vector_to_irq(u8 vector)
{
return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
}
void sn_irq_init(void)
{
int i;
ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
for (i = 0; i < NR_IRQS; i++) {
if (irq_get_chip(i) == &no_irq_chip)
irq_set_chip(i, &irq_type_sn);
}
}
static void register_intr_pda(struct sn_irq_info *sn_irq_info)
{
int irq = sn_irq_info->irq_irq;
int cpu = sn_irq_info->irq_cpuid;
if (pdacpu(cpu)->sn_last_irq < irq) {
pdacpu(cpu)->sn_last_irq = irq;
}
if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
pdacpu(cpu)->sn_first_irq = irq;
}
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
{
int irq = sn_irq_info->irq_irq;
int cpu = sn_irq_info->irq_cpuid;
struct sn_irq_info *tmp_irq_info;
int i, foundmatch;
rcu_read_lock();
if (pdacpu(cpu)->sn_last_irq == irq) {
foundmatch = 0;
for (i = pdacpu(cpu)->sn_last_irq - 1;
i && !foundmatch; i--) {
list_for_each_entry_rcu(tmp_irq_info,
sn_irq_lh[i],
list) {
if (tmp_irq_info->irq_cpuid == cpu) {
foundmatch = 1;
break;
}
}
}
pdacpu(cpu)->sn_last_irq = i;
}
if (pdacpu(cpu)->sn_first_irq == irq) {
foundmatch = 0;
for (i = pdacpu(cpu)->sn_first_irq + 1;
i < NR_IRQS && !foundmatch; i++) {
list_for_each_entry_rcu(tmp_irq_info,
sn_irq_lh[i],
list) {
if (tmp_irq_info->irq_cpuid == cpu) {
foundmatch = 1;
break;
}
}
}
pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
}
rcu_read_unlock();
}
void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
{
nasid_t nasid = sn_irq_info->irq_nasid;
int slice = sn_irq_info->irq_slice;
int cpu = nasid_slice_to_cpuid(nasid, slice);
#ifdef CONFIG_SMP
int cpuphys;
#endif
pci_dev_get(pci_dev);
sn_irq_info->irq_cpuid = cpu;
sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
/* link it into the sn_irq[irq] list */
spin_lock(&sn_irq_info_lock);
list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
reserve_irq_vector(sn_irq_info->irq_irq);
if (sn_irq_info->irq_int_bit != -1)
irq_set_handler(sn_irq_info->irq_irq, handle_level_irq);
spin_unlock(&sn_irq_info_lock);
register_intr_pda(sn_irq_info);
#ifdef CONFIG_SMP
cpuphys = cpu_physical_id(cpu);
set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
/*
* Affinity was set by the PROM, prevent it from
* being reset by the request_irq() path.
*/
irqd_mark_affinity_was_set(irq_get_irq_data(sn_irq_info->irq_irq));
#endif
}
void sn_irq_unfixup(struct pci_dev *pci_dev)
{
struct sn_irq_info *sn_irq_info;
/* Only cleanup IRQ stuff if this device has a host bus context */
if (!SN_PCIDEV_BUSSOFT(pci_dev))
return;
sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
if (!sn_irq_info)
return;
if (!sn_irq_info->irq_irq) {
kfree(sn_irq_info);
return;
}
unregister_intr_pda(sn_irq_info);
spin_lock(&sn_irq_info_lock);
list_del_rcu(&sn_irq_info->list);
spin_unlock(&sn_irq_info_lock);
if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
free_irq_vector(sn_irq_info->irq_irq);
kfree_rcu(sn_irq_info, rcu);
pci_dev_put(pci_dev);
}
static inline void
sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
{
struct sn_pcibus_provider *pci_provider;
pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
/* Don't force an interrupt if the irq has been disabled */
if (!irqd_irq_disabled(irq_get_irq_data(sn_irq_info->irq_irq)) &&
pci_provider && pci_provider->force_interrupt)
(*pci_provider->force_interrupt)(sn_irq_info);
}
/*
* Check for lost interrupts. If the PIC int_status reg. says that
* an interrupt has been sent, but not handled, and the interrupt
* is not pending in either the cpu irr regs or in the soft irr regs,
* and the interrupt is not in service, then the interrupt may have
* been lost. Force an interrupt on that pin. It is possible that
* the interrupt is in flight, so we may generate a spurious interrupt,
* but we should never miss a real lost interrupt.
*/
static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
{
u64 regval;
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
/*
* Bridge types attached to TIO (anything but PIC) do not need this WAR
* since they do not target Shub II interrupt registers. If that
* ever changes, this check needs to accommodate.
*/
if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
return;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (!pcidev_info)
return;
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
regval = pcireg_intr_status_get(pcibus_info);
if (!ia64_get_irr(irq_to_vector(irq))) {
if (!test_bit(irq, pda->sn_in_service_ivecs)) {
regval &= 0xff;
if (sn_irq_info->irq_int_bit & regval &
sn_irq_info->irq_last_intr) {
regval &= ~(sn_irq_info->irq_int_bit & regval);
sn_call_force_intr_provider(sn_irq_info);
}
}
}
sn_irq_info->irq_last_intr = regval;
}
void sn_lb_int_war_check(void)
{
struct sn_irq_info *sn_irq_info;
int i;
if (!sn_ioif_inited || pda->sn_first_irq == 0)
return;
rcu_read_lock();
for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
sn_check_intr(i, sn_irq_info);
}
}
rcu_read_unlock();
}
void __init sn_irq_lh_init(void)
{
int i;
sn_irq_lh = kmalloc_array(NR_IRQS, sizeof(struct list_head *),
GFP_KERNEL);
if (!sn_irq_lh)
panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
for (i = 0; i < NR_IRQS; i++) {
sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL);
if (!sn_irq_lh[i])
panic("SN PCI INIT: Failed IRQ memory allocation\n");
INIT_LIST_HEAD(sn_irq_lh[i]);
}
}

View file

@ -1,107 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/sn/types.h>
#include <asm/sn/module.h>
#include <asm/sn/l1.h>
char brick_types[MAX_BRICK_TYPES + 1] = "cri.xdpn%#=vo^kjbf890123456789...";
/*
* Format a module id for printing.
*
* There are three possible formats:
*
* MODULE_FORMAT_BRIEF is the brief 6-character format, including
* the actual brick-type as recorded in the
* moduleid_t, eg. 002c15 for a C-brick, or
* 101#17 for a PX-brick.
*
* MODULE_FORMAT_LONG is the hwgraph format, eg. rack/002/bay/15
* of rack/101/bay/17 (note that the brick
* type does not appear in this format).
*
* MODULE_FORMAT_LCD is like MODULE_FORMAT_BRIEF, except that it
* ensures that the module id provided appears
* exactly as it would on the LCD display of
* the corresponding brick, eg. still 002c15
* for a C-brick, but 101p17 for a PX-brick.
*
* maule (9/13/04): Removed top-level check for (fmt == MODULE_FORMAT_LCD)
* making MODULE_FORMAT_LCD equivalent to MODULE_FORMAT_BRIEF. It was
* decided that all callers should assume the returned string should be what
* is displayed on the brick L1 LCD.
*/
void
format_module_id(char *buffer, moduleid_t m, int fmt)
{
int rack, position;
unsigned char brickchar;
rack = MODULE_GET_RACK(m);
brickchar = MODULE_GET_BTCHAR(m);
/* Be sure we use the same brick type character as displayed
* on the brick's LCD
*/
switch (brickchar)
{
case L1_BRICKTYPE_GA:
case L1_BRICKTYPE_OPUS_TIO:
brickchar = L1_BRICKTYPE_C;
break;
case L1_BRICKTYPE_PX:
case L1_BRICKTYPE_PE:
case L1_BRICKTYPE_PA:
case L1_BRICKTYPE_SA: /* we can move this to the "I's" later
* if that makes more sense
*/
brickchar = L1_BRICKTYPE_P;
break;
case L1_BRICKTYPE_IX:
case L1_BRICKTYPE_IA:
brickchar = L1_BRICKTYPE_I;
break;
}
position = MODULE_GET_BPOS(m);
if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) {
/* Brief module number format, eg. 002c15 */
/* Decompress the rack number */
*buffer++ = '0' + RACK_GET_CLASS(rack);
*buffer++ = '0' + RACK_GET_GROUP(rack);
*buffer++ = '0' + RACK_GET_NUM(rack);
/* Add the brick type */
*buffer++ = brickchar;
}
else if (fmt == MODULE_FORMAT_LONG) {
/* Fuller hwgraph format, eg. rack/002/bay/15 */
strcpy(buffer, "rack" "/"); buffer += strlen(buffer);
*buffer++ = '0' + RACK_GET_CLASS(rack);
*buffer++ = '0' + RACK_GET_GROUP(rack);
*buffer++ = '0' + RACK_GET_NUM(rack);
strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer);
}
/* Add the bay position, using at least two digits */
if (position < 10)
*buffer++ = '0';
sprintf(buffer, "%d", position);
}

View file

@ -1,11 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#define MACHVEC_PLATFORM_NAME sn2
#define MACHVEC_PLATFORM_HEADER <asm/machvec_sn2.h>
#include <asm/machvec_init.h>

View file

@ -1,144 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <asm/mca.h>
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
/*
* Interval for calling SAL to poll for errors that do NOT cause error
* interrupts. SAL will raise a CPEI if any errors are present that
* need to be logged.
*/
#define CPEI_INTERVAL (5*HZ)
struct timer_list sn_cpei_timer;
void sn_init_cpei_timer(void);
/* Printing oemdata from mca uses data that is not passed through SAL, it is
* global. Only one user at a time.
*/
static DEFINE_MUTEX(sn_oemdata_mutex);
static u8 **sn_oemdata;
static u64 *sn_oemdata_size, sn_oemdata_bufsize;
/*
* print_hook
*
* This function is the callback routine that SAL calls to log error
* info for platform errors. buf is appended to sn_oemdata, resizing as
* required.
* Note: this is a SAL to OS callback, running under the same rules as the SAL
* code. SAL calls are run with preempt disabled so this routine must not
* sleep. vmalloc can sleep so print_hook cannot resize the output buffer
* itself, instead it must set the required size and return to let the caller
* resize the buffer then redrive the SAL call.
*/
static int print_hook(const char *fmt, ...)
{
char buf[400];
int len;
va_list args;
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
len = strlen(buf);
if (*sn_oemdata_size + len <= sn_oemdata_bufsize)
memcpy(*sn_oemdata + *sn_oemdata_size, buf, len);
*sn_oemdata_size += len;
return 0;
}
static void sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
{
/*
* this function's sole purpose is to call SAL when we receive
* a CE interrupt from SHUB or when the timer routine decides
* we need to call SAL to check for CEs.
*/
/* CALL SAL_LOG_CE */
ia64_sn_plat_cpei_handler();
}
static void sn_cpei_timer_handler(struct timer_list *unused)
{
sn_cpei_handler(-1, NULL, NULL);
mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL);
}
void sn_init_cpei_timer(void)
{
timer_setup(&sn_cpei_timer, sn_cpei_timer_handler, 0);
sn_cpei_timer.expires = jiffies + CPEI_INTERVAL;
add_timer(&sn_cpei_timer);
}
static int
sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
u64 * oemdata_size)
{
mutex_lock(&sn_oemdata_mutex);
sn_oemdata = oemdata;
sn_oemdata_size = oemdata_size;
sn_oemdata_bufsize = 0;
*sn_oemdata_size = PAGE_SIZE; /* first guess at how much data will be generated */
while (*sn_oemdata_size > sn_oemdata_bufsize) {
u8 *newbuf = vmalloc(*sn_oemdata_size);
if (!newbuf) {
mutex_unlock(&sn_oemdata_mutex);
printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
__func__);
return 1;
}
vfree(*sn_oemdata);
*sn_oemdata = newbuf;
sn_oemdata_bufsize = *sn_oemdata_size;
*sn_oemdata_size = 0;
ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
}
mutex_unlock(&sn_oemdata_mutex);
return 0;
}
/* Callback when userspace salinfo wants to decode oem data via the platform
* kernel and/or prom.
*/
int sn_salinfo_platform_oemdata(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size)
{
efi_guid_t guid = *(efi_guid_t *)sect_header;
int valid = 0;
*oemdata_size = 0;
vfree(*oemdata);
*oemdata = NULL;
if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0) {
sal_log_plat_specific_err_info_t *psei = (sal_log_plat_specific_err_info_t *)sect_header;
valid = psei->valid.oem_data;
} else if (efi_guidcmp(guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID) == 0) {
sal_log_mem_dev_err_info_t *mdei = (sal_log_mem_dev_err_info_t *)sect_header;
valid = mdei->valid.oem_data;
}
if (valid)
return sn_platform_plat_specific_err_print(sect_header, oemdata, oemdata_size);
else
return 0;
}
static int __init sn_salinfo_init(void)
{
if (ia64_platform_is("sn2"))
salinfo_platform_oemdata = &sn_salinfo_platform_oemdata;
return 0;
}
device_initcall(sn_salinfo_init);

View file

@ -1,238 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/cpumask.h>
#include <linux/msi.h>
#include <linux/slab.h>
#include <asm/sn/addrs.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/nodepda.h>
struct sn_msi_info {
u64 pci_addr;
struct sn_irq_info *sn_irq_info;
};
static struct sn_msi_info sn_msi_info[NR_IRQS];
static struct irq_chip sn_msi_chip;
void sn_teardown_msi_irq(unsigned int irq)
{
nasid_t nasid;
int widget;
struct pci_dev *pdev;
struct pcidev_info *sn_pdev;
struct sn_irq_info *sn_irq_info;
struct pcibus_bussoft *bussoft;
struct sn_pcibus_provider *provider;
sn_irq_info = sn_msi_info[irq].sn_irq_info;
if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
return;
sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
pdev = sn_pdev->pdi_linux_pcidev;
provider = SN_PCIDEV_BUSPROVIDER(pdev);
(*provider->dma_unmap)(pdev,
sn_msi_info[irq].pci_addr,
PCI_DMA_FROMDEVICE);
sn_msi_info[irq].pci_addr = 0;
bussoft = SN_PCIDEV_BUSSOFT(pdev);
nasid = NASID_GET(bussoft->bs_base);
widget = (nasid & 1) ?
TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
SWIN_WIDGETNUM(bussoft->bs_base);
sn_intr_free(nasid, widget, sn_irq_info);
sn_msi_info[irq].sn_irq_info = NULL;
destroy_irq(irq);
}
int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
{
struct msi_msg msg;
int widget;
int status;
nasid_t nasid;
u64 bus_addr;
struct sn_irq_info *sn_irq_info;
struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int irq;
if (!entry->msi_attrib.is_64)
return -EINVAL;
if (bussoft == NULL)
return -EINVAL;
if (provider == NULL || provider->dma_map_consistent == NULL)
return -EINVAL;
irq = create_irq();
if (irq < 0)
return irq;
/*
* Set up the vector plumbing. Let the prom (via sn_intr_alloc)
* decide which cpu to direct this msi at by default.
*/
nasid = NASID_GET(bussoft->bs_base);
widget = (nasid & 1) ?
TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
SWIN_WIDGETNUM(bussoft->bs_base);
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (! sn_irq_info) {
destroy_irq(irq);
return -ENOMEM;
}
status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
if (status) {
kfree(sn_irq_info);
destroy_irq(irq);
return -ENOMEM;
}
sn_irq_info->irq_int_bit = -1; /* mark this as an MSI irq */
sn_irq_fixup(pdev, sn_irq_info);
/* Prom probably should fill these in, but doesn't ... */
sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
sn_irq_info->irq_bridge = (void *)bussoft->bs_base;
/*
* Map the xio address into bus space
*/
bus_addr = (*provider->dma_map_consistent)(pdev,
sn_irq_info->irq_xtalkaddr,
sizeof(sn_irq_info->irq_xtalkaddr),
SN_DMA_MSI|SN_DMA_ADDR_XIO);
if (! bus_addr) {
sn_intr_free(nasid, widget, sn_irq_info);
kfree(sn_irq_info);
destroy_irq(irq);
return -ENOMEM;
}
sn_msi_info[irq].sn_irq_info = sn_irq_info;
sn_msi_info[irq].pci_addr = bus_addr;
msg.address_hi = (u32)(bus_addr >> 32);
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
/*
* In the SN platform, bit 16 is a "send vector" bit which
* must be present in order to move the vector through the system.
*/
msg.data = 0x100 + irq;
irq_set_msi_desc(irq, entry);
pci_write_msi_msg(irq, &msg);
irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
return 0;
}
#ifdef CONFIG_SMP
static int sn_set_msi_irq_affinity(struct irq_data *data,
const struct cpumask *cpu_mask, bool force)
{
struct msi_msg msg;
int slice;
nasid_t nasid;
u64 bus_addr;
struct pci_dev *pdev;
struct pcidev_info *sn_pdev;
struct sn_irq_info *sn_irq_info;
struct sn_irq_info *new_irq_info;
struct sn_pcibus_provider *provider;
unsigned int cpu, irq = data->irq;
cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
sn_irq_info = sn_msi_info[irq].sn_irq_info;
if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
return -1;
/*
* Release XIO resources for the old MSI PCI address
*/
__get_cached_msi_msg(irq_data_get_msi_desc(data), &msg);
sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
pdev = sn_pdev->pdi_linux_pcidev;
provider = SN_PCIDEV_BUSPROVIDER(pdev);
bus_addr = (u64)(msg.address_hi) << 32 | (u64)(msg.address_lo);
(*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE);
sn_msi_info[irq].pci_addr = 0;
nasid = cpuid_to_nasid(cpu);
slice = cpuid_to_slice(cpu);
new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
sn_msi_info[irq].sn_irq_info = new_irq_info;
if (new_irq_info == NULL)
return -1;
/*
* Map the xio address into bus space
*/
bus_addr = (*provider->dma_map_consistent)(pdev,
new_irq_info->irq_xtalkaddr,
sizeof(new_irq_info->irq_xtalkaddr),
SN_DMA_MSI|SN_DMA_ADDR_XIO);
sn_msi_info[irq].pci_addr = bus_addr;
msg.address_hi = (u32)(bus_addr >> 32);
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
pci_write_msi_msg(irq, &msg);
cpumask_copy(irq_data_get_affinity_mask(data), cpu_mask);
return 0;
}
#endif /* CONFIG_SMP */
static void sn_ack_msi_irq(struct irq_data *data)
{
irq_move_irq(data);
ia64_eoi();
}
static int sn_msi_retrigger_irq(struct irq_data *data)
{
unsigned int vector = data->irq;
ia64_resend_irq(vector);
return 1;
}
static struct irq_chip sn_msi_chip = {
.name = "PCI-MSI",
.irq_mask = pci_msi_mask_irq,
.irq_unmask = pci_msi_unmask_irq,
.irq_ack = sn_ack_msi_irq,
#ifdef CONFIG_SMP
.irq_set_affinity = sn_set_msi_irq_affinity,
#endif
.irq_retrigger = sn_msi_retrigger_irq,
};

View file

@ -1,71 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
*
* This file contains macros used to access MMR registers via
* uncached physical addresses.
* pio_phys_read_mmr - read an MMR
* pio_phys_write_mmr - write an MMR
* pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0
* Second MMR will be skipped if address is NULL
*
* Addresses passed to these routines should be uncached physical addresses
* ie., 0x80000....
*/
#include <asm/asmmacro.h>
#include <asm/page.h>
GLOBAL_ENTRY(pio_phys_read_mmr)
.prologue
.regstk 1,0,0,0
.body
mov r2=psr
rsm psr.i | psr.dt
;;
srlz.d
ld8.acq r8=[r32]
;;
mov psr.l=r2;;
srlz.d
br.ret.sptk.many rp
END(pio_phys_read_mmr)
GLOBAL_ENTRY(pio_phys_write_mmr)
.prologue
.regstk 2,0,0,0
.body
mov r2=psr
rsm psr.i | psr.dt
;;
srlz.d
st8.rel [r32]=r33
;;
mov psr.l=r2;;
srlz.d
br.ret.sptk.many rp
END(pio_phys_write_mmr)
GLOBAL_ENTRY(pio_atomic_phys_write_mmrs)
.prologue
.regstk 4,0,0,0
.body
mov r2=psr
cmp.ne p9,p0=r34,r0;
rsm psr.i | psr.dt | psr.ic
;;
srlz.d
st8.rel [r32]=r33
(p9) st8.rel [r34]=r35
;;
mov psr.l=r2;;
srlz.d
br.ret.sptk.many rp
END(pio_atomic_phys_write_mmrs)

View file

@ -1,786 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/kdev_t.h>
#include <linux/string.h>
#include <linux/screen_info.h>
#include <linux/console.h>
#include <linux/timex.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/serial.h>
#include <linux/irq.h>
#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/interrupt.h>
#include <linux/acpi.h>
#include <linux/compiler.h>
#include <linux/root_dev.h>
#include <linux/nodemask.h>
#include <linux/pm.h>
#include <linux/efi.h>
#include <asm/io.h>
#include <asm/sal.h>
#include <asm/machvec.h>
#include <asm/processor.h>
#include <asm/vga.h>
#include <asm/setup.h>
#include <asm/sn/arch.h>
#include <asm/sn/addrs.h>
#include <asm/sn/pda.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/simulator.h>
#include <asm/sn/leds.h>
#include <asm/sn/bte.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/clksupport.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/geo.h>
#include <asm/sn/sn_feature_sets.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
#include <asm/sn/klconfig.h>
DEFINE_PER_CPU(struct pda_s, pda_percpu);
#define MAX_PHYS_MEMORY (1UL << IA64_MAX_PHYS_BITS) /* Max physical address supported */
extern void bte_init_node(nodepda_t *, cnodeid_t);
extern void sn_timer_init(void);
extern unsigned long last_time_offset;
extern void (*ia64_mark_idle) (int);
extern void snidle(int);
unsigned long sn_rtc_cycles_per_second;
EXPORT_SYMBOL(sn_rtc_cycles_per_second);
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
EXPORT_PER_CPU_SYMBOL(__sn_nodepda);
char sn_system_serial_number_string[128];
EXPORT_SYMBOL(sn_system_serial_number_string);
u64 sn_partition_serial_number;
EXPORT_SYMBOL(sn_partition_serial_number);
u8 sn_partition_id;
EXPORT_SYMBOL(sn_partition_id);
u8 sn_system_size;
EXPORT_SYMBOL(sn_system_size);
u8 sn_sharing_domain_size;
EXPORT_SYMBOL(sn_sharing_domain_size);
u8 sn_coherency_id;
EXPORT_SYMBOL(sn_coherency_id);
u8 sn_region_size;
EXPORT_SYMBOL(sn_region_size);
int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
short physical_node_map[MAX_NUMALINK_NODES];
static unsigned long sn_prom_features[MAX_PROM_FEATURE_SETS];
EXPORT_SYMBOL(physical_node_map);
int num_cnodes;
static void sn_init_pdas(char **);
static void build_cnode_tables(void);
static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
/*
* The format of "screen_info" is strange, and due to early i386-setup
* code. This is just enough to make the console code think we're on a
* VGA color display.
*/
struct screen_info sn_screen_info = {
.orig_x = 0,
.orig_y = 0,
.orig_video_mode = 3,
.orig_video_cols = 80,
.orig_video_ega_bx = 3,
.orig_video_lines = 25,
.orig_video_isVGA = 1,
.orig_video_points = 16
};
/*
* This routine can only be used during init, since
* smp_boot_data is an init data structure.
* We have to use smp_boot_data.cpu_phys_id to find
* the physical id of the processor because the normal
* cpu_physical_id() relies on data structures that
* may not be initialized yet.
*/
static int __init pxm_to_nasid(int pxm)
{
int i;
int nid;
nid = pxm_to_node(pxm);
for (i = 0; i < num_node_memblks; i++) {
if (node_memblk[i].nid == nid) {
return NASID_GET(node_memblk[i].start_paddr);
}
}
return -1;
}
/**
* early_sn_setup - early setup routine for SN platforms
*
* Sets up an initial console to aid debugging. Intended primarily
* for bringup. See start_kernel() in init/main.c.
*/
void __init early_sn_setup(void)
{
efi_system_table_t *efi_systab;
efi_config_table_t *config_tables;
struct ia64_sal_systab *sal_systab;
struct ia64_sal_desc_entry_point *ep;
char *p;
int i, j;
/*
* Parse enough of the SAL tables to locate the SAL entry point. Since, console
* IO on SN2 is done via SAL calls, early_printk won't work without this.
*
* This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
* Any changes to those file may have to be made here as well.
*/
efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
config_tables = __va(efi_systab->tables);
for (i = 0; i < efi_systab->nr_tables; i++) {
if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) ==
0) {
sal_systab = __va(config_tables[i].table);
p = (char *)(sal_systab + 1);
for (j = 0; j < sal_systab->entry_count; j++) {
if (*p == SAL_DESC_ENTRY_POINT) {
ep = (struct ia64_sal_desc_entry_point
*)p;
ia64_sal_handler_init(__va
(ep->sal_proc),
__va(ep->gp));
return;
}
p += SAL_DESC_SIZE(*p);
}
}
}
/* Uh-oh, SAL not available?? */
printk(KERN_ERR "failed to find SAL entry point\n");
}
extern int platform_intr_list[];
static int shub_1_1_found;
/*
* sn_check_for_wars
*
* Set flag for enabling shub specific wars
*/
static inline int is_shub_1_1(int nasid)
{
unsigned long id;
int rev;
if (is_shub2())
return 0;
id = REMOTE_HUB_L(nasid, SH1_SHUB_ID);
rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT;
return rev <= 2;
}
static void sn_check_for_wars(void)
{
int cnode;
if (is_shub2()) {
/* none yet */
} else {
for_each_online_node(cnode) {
if (is_shub_1_1(cnodeid_to_nasid(cnode)))
shub_1_1_found = 1;
}
}
}
/*
* Scan the EFI PCDP table (if it exists) for an acceptable VGA console
* output device. If one exists, pick it and set sn_legacy_{io,mem} to
* reflect the bus offsets needed to address it.
*
* Since pcdp support in SN is not supported in the 2.4 kernel (or at least
* the one lbs is based on) just declare the needed structs here.
*
* Reference spec http://www.dig64.org/specifications/DIG64_PCDPv20.pdf
*
* Returns 0 if no acceptable vga is found, !0 otherwise.
*
* Note: This stuff is duped here because Altix requires the PCDP to
* locate a usable VGA device due to lack of proper ACPI support. Structures
* could be used from drivers/firmware/pcdp.h, but it was decided that moving
* this file to a more public location just for Altix use was undesirable.
*/
struct hcdp_uart_desc {
u8 pad[45];
};
struct pcdp {
u8 signature[4]; /* should be 'HCDP' */
u32 length;
u8 rev; /* should be >=3 for pcdp, <3 for hcdp */
u8 sum;
u8 oem_id[6];
u64 oem_tableid;
u32 oem_rev;
u32 creator_id;
u32 creator_rev;
u32 num_type0;
struct hcdp_uart_desc uart[0]; /* num_type0 of these */
/* pcdp descriptors follow */
} __attribute__((packed));
struct pcdp_device_desc {
u8 type;
u8 primary;
u16 length;
u16 index;
/* interconnect specific structure follows */
/* device specific structure follows that */
} __attribute__((packed));
struct pcdp_interface_pci {
u8 type; /* 1 == pci */
u8 reserved;
u16 length;
u8 segment;
u8 bus;
u8 dev;
u8 fun;
u16 devid;
u16 vendid;
u32 acpi_interrupt;
u64 mmio_tra;
u64 ioport_tra;
u8 flags;
u8 translation;
} __attribute__((packed));
struct pcdp_vga_device {
u8 num_eas_desc;
/* ACPI Extended Address Space Desc follows */
} __attribute__((packed));
/* from pcdp_device_desc.primary */
#define PCDP_PRIMARY_CONSOLE 0x01
/* from pcdp_device_desc.type */
#define PCDP_CONSOLE_INOUT 0x0
#define PCDP_CONSOLE_DEBUG 0x1
#define PCDP_CONSOLE_OUT 0x2
#define PCDP_CONSOLE_IN 0x3
#define PCDP_CONSOLE_TYPE_VGA 0x8
#define PCDP_CONSOLE_VGA (PCDP_CONSOLE_TYPE_VGA | PCDP_CONSOLE_OUT)
/* from pcdp_interface_pci.type */
#define PCDP_IF_PCI 1
/* from pcdp_interface_pci.translation */
#define PCDP_PCI_TRANS_IOPORT 0x02
#define PCDP_PCI_TRANS_MMIO 0x01
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
static void
sn_scan_pcdp(void)
{
u8 *bp;
struct pcdp *pcdp;
struct pcdp_device_desc device;
struct pcdp_interface_pci if_pci;
extern struct efi efi;
if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
return; /* no hcdp/pcdp table */
pcdp = __va(efi.hcdp);
if (pcdp->rev < 3)
return; /* only support PCDP (rev >= 3) */
for (bp = (u8 *)&pcdp->uart[pcdp->num_type0];
bp < (u8 *)pcdp + pcdp->length;
bp += device.length) {
memcpy(&device, bp, sizeof(device));
if (! (device.primary & PCDP_PRIMARY_CONSOLE))
continue; /* not primary console */
if (device.type != PCDP_CONSOLE_VGA)
continue; /* not VGA descriptor */
memcpy(&if_pci, bp+sizeof(device), sizeof(if_pci));
if (if_pci.type != PCDP_IF_PCI)
continue; /* not PCI interconnect */
if (if_pci.translation & PCDP_PCI_TRANS_IOPORT)
vga_console_iobase = if_pci.ioport_tra;
if (if_pci.translation & PCDP_PCI_TRANS_MMIO)
vga_console_membase =
if_pci.mmio_tra | __IA64_UNCACHED_OFFSET;
break; /* once we find the primary, we're done */
}
}
#endif
static unsigned long sn2_rtc_initial;
/**
* sn_setup - SN platform setup routine
* @cmdline_p: kernel command line
*
* Handles platform setup for SN machines. This includes determining
* the RTC frequency (via a SAL call), initializing secondary CPUs, and
* setting up per-node data areas. The console is also initialized here.
*/
void __init sn_setup(char **cmdline_p)
{
long status, ticks_per_sec, drift;
u32 version = sn_sal_rev();
extern void sn_cpu_init(void);
sn2_rtc_initial = rtc_time();
ia64_sn_plat_set_error_handling_features(); // obsolete
ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES);
/*
* Note: The calls to notify the PROM of ACPI and PCI Segment
* support must be done prior to acpi_load_tables(), as
* an ACPI capable PROM will rebuild the DSDT as result
* of the call.
*/
ia64_sn_set_os_feature(OSF_PCISEGMENT_ENABLE);
ia64_sn_set_os_feature(OSF_ACPI_ENABLE);
/* Load the new DSDT and SSDT tables into the global table list. */
acpi_table_init();
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
/*
* Handle SN vga console.
*
* SN systems do not have enough ACPI table information
* being passed from prom to identify VGA adapters and the legacy
* addresses to access them. Until that is done, SN systems rely
* on the PCDP table to identify the primary VGA console if one
* exists.
*
* However, kernel PCDP support is optional, and even if it is built
* into the kernel, it will not be used if the boot cmdline contains
* console= directives.
*
* So, to work around this mess, we duplicate some of the PCDP code
* here so that the primary VGA console (as defined by PCDP) will
* work on SN systems even if a different console (e.g. serial) is
* selected on the boot line (or CONFIG_EFI_PCDP is off).
*/
if (! vga_console_membase)
sn_scan_pcdp();
/*
* Setup legacy IO space.
* vga_console_iobase maps to PCI IO Space address 0 on the
* bus containing the VGA console.
*/
if (vga_console_iobase) {
io_space[0].mmio_base =
(unsigned long) ioremap(vga_console_iobase, 0);
io_space[0].sparse = 0;
}
if (vga_console_membase) {
/* usable vga ... make tty0 the preferred default console */
if (!strstr(*cmdline_p, "console="))
add_preferred_console("tty", 0, NULL);
} else {
printk(KERN_DEBUG "SGI: Disabling VGA console\n");
if (!strstr(*cmdline_p, "console="))
add_preferred_console("ttySG", 0, NULL);
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#else
conswitchp = NULL;
#endif /* CONFIG_DUMMY_CONSOLE */
}
#endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
/*
* Build the tables for managing cnodes.
*/
build_cnode_tables();
status =
ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
&drift);
if (status != 0 || ticks_per_sec < 100000) {
printk(KERN_WARNING
"unable to determine platform RTC clock frequency, guessing.\n");
/* PROM gives wrong value for clock freq. so guess */
sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
} else
sn_rtc_cycles_per_second = ticks_per_sec;
platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF);
/*
* we set the default root device to /dev/hda
* to make simulation easy
*/
ROOT_DEV = Root_HDA1;
/*
* Create the PDAs and NODEPDAs for all the cpus.
*/
sn_init_pdas(cmdline_p);
ia64_mark_idle = &snidle;
/*
* For the bootcpu, we do this here. All other cpus will make the
* call as part of cpu_init in slave cpu initialization.
*/
sn_cpu_init();
#ifdef CONFIG_SMP
init_smp_config();
#endif
screen_info = sn_screen_info;
sn_timer_init();
/*
* set pm_power_off to a SAL call to allow
* sn machines to power off. The SAL call can be replaced
* by an ACPI interface call when ACPI is fully implemented
* for sn.
*/
pm_power_off = ia64_sn_power_down;
current->thread.flags |= IA64_THREAD_MIGRATION;
}
/**
* sn_init_pdas - setup node data areas
*
* One time setup for Node Data Area. Called by sn_setup().
*/
static void __init sn_init_pdas(char **cmdline_p)
{
cnodeid_t cnode;
/*
* Allocate & initialize the nodepda for each node.
*/
for_each_online_node(cnode) {
nodepdaindr[cnode] =
memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES,
cnode);
if (!nodepdaindr[cnode])
panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n",
__func__, sizeof(nodepda_t), SMP_CACHE_BYTES,
cnode);
memset(nodepdaindr[cnode]->phys_cpuid, -1,
sizeof(nodepdaindr[cnode]->phys_cpuid));
spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
}
/*
* Allocate & initialize nodepda for TIOs. For now, put them on node 0.
*/
for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
nodepdaindr[cnode] =
memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0);
if (!nodepdaindr[cnode])
panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n",
__func__, sizeof(nodepda_t), SMP_CACHE_BYTES,
cnode);
}
/*
* Now copy the array of nodepda pointers to each nodepda.
*/
for (cnode = 0; cnode < num_cnodes; cnode++)
memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
sizeof(nodepdaindr));
/*
* Set up IO related platform-dependent nodepda fields.
* The following routine actually sets up the hubinfo struct
* in nodepda.
*/
for_each_online_node(cnode) {
bte_init_node(nodepdaindr[cnode], cnode);
}
/*
* Initialize the per node hubdev. This includes IO Nodes and
* headless/memless nodes.
*/
for (cnode = 0; cnode < num_cnodes; cnode++) {
hubdev_init_node(nodepdaindr[cnode], cnode);
}
}
/**
* sn_cpu_init - initialize per-cpu data areas
* @cpuid: cpuid of the caller
*
* Called during cpu initialization on each cpu as it starts.
* Currently, initializes the per-cpu data area for SNIA.
* Also sets up a few fields in the nodepda. Also known as
* platform_cpu_init() by the ia64 machvec code.
*/
void sn_cpu_init(void)
{
int cpuid;
int cpuphyid;
int nasid;
int subnode;
int slice;
int cnode;
int i;
static int wars_have_been_checked, set_cpu0_number;
cpuid = smp_processor_id();
if (cpuid == 0 && IS_MEDUSA()) {
if (ia64_sn_is_fake_prom())
sn_prom_type = 2;
else
sn_prom_type = 1;
printk(KERN_INFO "Running on medusa with %s PROM\n",
(sn_prom_type == 1) ? "real" : "fake");
}
memset(pda, 0, sizeof(*pda));
if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
&sn_hub_info->nasid_bitmask,
&sn_hub_info->nasid_shift,
&sn_system_size, &sn_sharing_domain_size,
&sn_partition_id, &sn_coherency_id,
&sn_region_size))
BUG();
sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
/*
* Don't check status. The SAL call is not supported on all PROMs
* but a failure is harmless.
* Architecturally, cpu_init is always called twice on cpu 0. We
* should set cpu_number on cpu 0 once.
*/
if (cpuid == 0) {
if (!set_cpu0_number) {
(void) ia64_sn_set_cpu_number(cpuid);
set_cpu0_number = 1;
}
} else
(void) ia64_sn_set_cpu_number(cpuid);
/*
* The boot cpu makes this call again after platform initialization is
* complete.
*/
if (nodepdaindr[0] == NULL)
return;
for (i = 0; i < MAX_PROM_FEATURE_SETS; i++)
if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
break;
cpuphyid = get_sapicid();
if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
BUG();
for (i=0; i < MAX_NUMNODES; i++) {
if (nodepdaindr[i]) {
nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
}
}
cnode = nasid_to_cnodeid(nasid);
__this_cpu_write(__sn_nodepda, nodepdaindr[cnode]);
pda->led_address =
(typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
pda->led_state = LED_ALWAYS_SET;
pda->hb_count = HZ / 2;
pda->hb_state = 0;
pda->idle_flag = 0;
if (cpuid != 0) {
/* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */
memcpy(sn_cnodeid_to_nasid,
(&per_cpu(__sn_cnodeid_to_nasid, 0)),
sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
}
/*
* Check for WARs.
* Only needs to be done once, on BSP.
* Has to be done after loop above, because it uses this cpu's
* sn_cnodeid_to_nasid table which was just initialized if this
* isn't cpu 0.
* Has to be done before assignment below.
*/
if (!wars_have_been_checked) {
sn_check_for_wars();
wars_have_been_checked = 1;
}
sn_hub_info->shub_1_1_found = shub_1_1_found;
/*
* Set up addresses of PIO/MEM write status registers.
*/
{
u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2,
SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3};
u64 *pio;
pio = is_shub1() ? pio1 : pio2;
pda->pio_write_status_addr =
(volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]);
pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
}
/*
* WAR addresses for SHUB 1.x.
*/
if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
int buddy_nasid;
buddy_nasid =
cnodeid_to_nasid(numa_node_id() ==
num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
pda->pio_shub_war_cam_addr =
(volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
SH1_PI_CAM_CONTROL);
}
}
/*
* Build tables for converting between NASIDs and cnodes.
*/
static inline int __init board_needs_cnode(int type)
{
return (type == KLTYPE_SNIA || type == KLTYPE_TIO);
}
void __init build_cnode_tables(void)
{
int nasid;
int node;
lboard_t *brd;
memset(physical_node_map, -1, sizeof(physical_node_map));
memset(sn_cnodeid_to_nasid, -1,
sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid)));
/*
* First populate the tables with C/M bricks. This ensures that
* cnode == node for all C & M bricks.
*/
for_each_online_node(node) {
nasid = pxm_to_nasid(node_to_pxm(node));
sn_cnodeid_to_nasid[node] = nasid;
physical_node_map[nasid] = node;
}
/*
* num_cnodes is total number of C/M/TIO bricks. Because of the 256 node
* limit on the number of nodes, we can't use the generic node numbers
* for this. Note that num_cnodes is incremented below as TIOs or
* headless/memoryless nodes are discovered.
*/
num_cnodes = num_online_nodes();
/* fakeprom does not support klgraph */
if (IS_RUNNING_ON_FAKE_PROM())
return;
/* Find TIOs & headless/memoryless nodes and add them to the tables */
for_each_online_node(node) {
kl_config_hdr_t *klgraph_header;
nasid = cnodeid_to_nasid(node);
klgraph_header = ia64_sn_get_klconfig_addr(nasid);
BUG_ON(klgraph_header == NULL);
brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info);
while (brd) {
if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) {
sn_cnodeid_to_nasid[num_cnodes] = brd->brd_nasid;
physical_node_map[brd->brd_nasid] = num_cnodes++;
}
brd = find_lboard_next(brd);
}
}
}
int
nasid_slice_to_cpuid(int nasid, int slice)
{
long cpu;
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
if (cpuid_to_nasid(cpu) == nasid &&
cpuid_to_slice(cpu) == slice)
return cpu;
return -1;
}
int sn_prom_feature_available(int id)
{
if (id >= BITS_PER_LONG * MAX_PROM_FEATURE_SETS)
return 0;
return test_bit(id, sn_prom_features);
}
void
sn_kernel_launch_event(void)
{
/* ignore status until we understand possible failure, if any*/
if (ia64_sn_kernel_launch_event())
printk(KERN_ERR "KEXEC is not supported in this PROM, Please update the PROM.\n");
}
EXPORT_SYMBOL(sn_prom_feature_available);

View file

@ -1,13 +0,0 @@
# arch/ia64/sn/kernel/sn2/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
#
# sn2 specific kernel files
#
obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o

View file

@ -1,41 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2003, 2006 Silicon Graphics, Inc. All rights reserved.
*
*/
#include <linux/module.h>
#include <asm/pgalloc.h>
#include <asm/sn/arch.h>
/**
* sn_flush_all_caches - flush a range of address from all caches (incl. L4)
* @flush_addr: identity mapped region 7 address to start flushing
* @bytes: number of bytes to flush
*
* Flush a range of addresses from all caches including L4.
* All addresses fully or partially contained within
* @flush_addr to @flush_addr + @bytes are flushed
* from all caches.
*/
void
sn_flush_all_caches(long flush_addr, long bytes)
{
unsigned long addr = flush_addr;
/* SHub1 requires a cached address */
if (is_shub1() && (addr & RGN_BITS) == RGN_BASE(RGN_UNCACHED))
addr = (addr - RGN_BASE(RGN_UNCACHED)) + RGN_BASE(RGN_KERNEL);
flush_icache_range(addr, addr + bytes);
/*
* The last call may have returned before the caches
* were actually flushed, so we call it again to make
* sure.
*/
flush_icache_range(addr, addr + bytes);
mb();
}
EXPORT_SYMBOL(sn_flush_all_caches);

View file

@ -1,101 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
*
* The generic kernel requires function pointers to these routines, so
* we wrap the inlines from asm/ia64/sn/sn2/io.h here.
*/
#include <asm/sn/io.h>
#ifdef CONFIG_IA64_GENERIC
#undef __sn_inb
#undef __sn_inw
#undef __sn_inl
#undef __sn_outb
#undef __sn_outw
#undef __sn_outl
#undef __sn_readb
#undef __sn_readw
#undef __sn_readl
#undef __sn_readq
#undef __sn_readb_relaxed
#undef __sn_readw_relaxed
#undef __sn_readl_relaxed
#undef __sn_readq_relaxed
unsigned int __sn_inb(unsigned long port)
{
return ___sn_inb(port);
}
unsigned int __sn_inw(unsigned long port)
{
return ___sn_inw(port);
}
unsigned int __sn_inl(unsigned long port)
{
return ___sn_inl(port);
}
void __sn_outb(unsigned char val, unsigned long port)
{
___sn_outb(val, port);
}
void __sn_outw(unsigned short val, unsigned long port)
{
___sn_outw(val, port);
}
void __sn_outl(unsigned int val, unsigned long port)
{
___sn_outl(val, port);
}
unsigned char __sn_readb(void __iomem *addr)
{
return ___sn_readb(addr);
}
unsigned short __sn_readw(void __iomem *addr)
{
return ___sn_readw(addr);
}
unsigned int __sn_readl(void __iomem *addr)
{
return ___sn_readl(addr);
}
unsigned long __sn_readq(void __iomem *addr)
{
return ___sn_readq(addr);
}
unsigned char __sn_readb_relaxed(void __iomem *addr)
{
return ___sn_readb_relaxed(addr);
}
unsigned short __sn_readw_relaxed(void __iomem *addr)
{
return ___sn_readw_relaxed(addr);
}
unsigned int __sn_readl_relaxed(void __iomem *addr)
{
return ___sn_readl_relaxed(addr);
}
unsigned long __sn_readq_relaxed(void __iomem *addr)
{
return ___sn_readq_relaxed(addr);
}
#endif

View file

@ -1,207 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999,2001-2004, 2006 Silicon Graphics, Inc. All Rights Reserved.
*
* Module to export the system's Firmware Interface Tables, including
* PROM revision numbers and banners, in /proc
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/nodemask.h>
#include <asm/io.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/addrs.h>
MODULE_DESCRIPTION("PROM version reporting for /proc");
MODULE_AUTHOR("Chad Talbott");
MODULE_LICENSE("GPL");
/* Standard Intel FIT entry types */
#define FIT_ENTRY_FIT_HEADER 0x00 /* FIT header entry */
#define FIT_ENTRY_PAL_B 0x01 /* PAL_B entry */
/* Entries 0x02 through 0x0D reserved by Intel */
#define FIT_ENTRY_PAL_A_PROC 0x0E /* Processor-specific PAL_A entry */
#define FIT_ENTRY_PAL_A 0x0F /* PAL_A entry, same as... */
#define FIT_ENTRY_PAL_A_GEN 0x0F /* ...Generic PAL_A entry */
#define FIT_ENTRY_UNUSED 0x7F /* Unused (reserved by Intel?) */
/* OEM-defined entries range from 0x10 to 0x7E. */
#define FIT_ENTRY_SAL_A 0x10 /* SAL_A entry */
#define FIT_ENTRY_SAL_B 0x11 /* SAL_B entry */
#define FIT_ENTRY_SALRUNTIME 0x12 /* SAL runtime entry */
#define FIT_ENTRY_EFI 0x1F /* EFI entry */
#define FIT_ENTRY_FPSWA 0x20 /* embedded fpswa entry */
#define FIT_ENTRY_VMLINUX 0x21 /* embedded vmlinux entry */
#define FIT_MAJOR_SHIFT (32 + 8)
#define FIT_MAJOR_MASK ((1 << 8) - 1)
#define FIT_MINOR_SHIFT 32
#define FIT_MINOR_MASK ((1 << 8) - 1)
#define FIT_MAJOR(q) \
((unsigned) ((q) >> FIT_MAJOR_SHIFT) & FIT_MAJOR_MASK)
#define FIT_MINOR(q) \
((unsigned) ((q) >> FIT_MINOR_SHIFT) & FIT_MINOR_MASK)
#define FIT_TYPE_SHIFT (32 + 16)
#define FIT_TYPE_MASK ((1 << 7) - 1)
#define FIT_TYPE(q) \
((unsigned) ((q) >> FIT_TYPE_SHIFT) & FIT_TYPE_MASK)
struct fit_type_map_t {
unsigned char type;
const char *name;
};
static const struct fit_type_map_t fit_entry_types[] = {
{FIT_ENTRY_FIT_HEADER, "FIT Header"},
{FIT_ENTRY_PAL_A_GEN, "Generic PAL_A"},
{FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A"},
{FIT_ENTRY_PAL_A, "PAL_A"},
{FIT_ENTRY_PAL_B, "PAL_B"},
{FIT_ENTRY_SAL_A, "SAL_A"},
{FIT_ENTRY_SAL_B, "SAL_B"},
{FIT_ENTRY_SALRUNTIME, "SAL runtime"},
{FIT_ENTRY_EFI, "EFI"},
{FIT_ENTRY_VMLINUX, "Embedded Linux"},
{FIT_ENTRY_FPSWA, "Embedded FPSWA"},
{FIT_ENTRY_UNUSED, "Unused"},
{0xff, "Error"},
};
static const char *fit_type_name(unsigned char type)
{
struct fit_type_map_t const *mapp;
for (mapp = fit_entry_types; mapp->type != 0xff; mapp++)
if (type == mapp->type)
return mapp->name;
if ((type > FIT_ENTRY_PAL_A) && (type < FIT_ENTRY_UNUSED))
return "OEM type";
if ((type > FIT_ENTRY_PAL_B) && (type < FIT_ENTRY_PAL_A))
return "Reserved";
return "Unknown type";
}
static int
get_fit_entry(unsigned long nasid, int index, unsigned long *fentry,
char *banner, int banlen)
{
return ia64_sn_get_fit_compt(nasid, index, fentry, banner, banlen);
}
/*
* These two routines display the FIT table for each node.
*/
static void dump_fit_entry(struct seq_file *m, unsigned long *fentry)
{
unsigned type;
type = FIT_TYPE(fentry[1]);
seq_printf(m, "%02x %-25s %x.%02x %016lx %u\n",
type,
fit_type_name(type),
FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]),
fentry[0],
/* mult by sixteen to get size in bytes */
(unsigned)(fentry[1] & 0xffffff) * 16);
}
/*
* We assume that the fit table will be small enough that we can print
* the whole thing into one page. (This is true for our default 16kB
* pages -- each entry is about 60 chars wide when printed.) I read
* somewhere that the maximum size of the FIT is 128 entries, so we're
* OK except for 4kB pages (and no one is going to do that on SN
* anyway).
*/
static int proc_fit_show(struct seq_file *m, void *v)
{
unsigned long nasid = (unsigned long)m->private;
unsigned long fentry[2];
int index;
for (index=0;;index++) {
BUG_ON(index * 60 > PAGE_SIZE);
if (get_fit_entry(nasid, index, fentry, NULL, 0))
break;
dump_fit_entry(m, fentry);
}
return 0;
}
static int proc_version_show(struct seq_file *m, void *v)
{
unsigned long nasid = (unsigned long)m->private;
unsigned long fentry[2];
char banner[128];
int index;
for (index = 0; ; index++) {
if (get_fit_entry(nasid, index, fentry, banner,
sizeof(banner)))
return 0;
if (FIT_TYPE(fentry[1]) == FIT_ENTRY_SAL_A)
break;
}
seq_printf(m, "%x.%02x\n", FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]));
if (banner[0])
seq_printf(m, "%s\n", banner);
return 0;
}
/* module entry points */
int __init prominfo_init(void);
void __exit prominfo_exit(void);
module_init(prominfo_init);
module_exit(prominfo_exit);
#define NODE_NAME_LEN 11
int __init prominfo_init(void)
{
struct proc_dir_entry *sgi_prominfo_entry;
cnodeid_t cnodeid;
if (!ia64_platform_is("sn2"))
return 0;
sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);
if (!sgi_prominfo_entry)
return -ENOMEM;
for_each_online_node(cnodeid) {
struct proc_dir_entry *dir;
unsigned long nasid;
char name[NODE_NAME_LEN];
sprintf(name, "node%d", cnodeid);
dir = proc_mkdir(name, sgi_prominfo_entry);
if (!dir)
continue;
nasid = cnodeid_to_nasid(cnodeid);
proc_create_single_data("fit", 0, dir, proc_fit_show,
(void *)nasid);
proc_create_single_data("version", 0, dir, proc_version_show,
(void *)nasid);
}
return 0;
}
void __exit prominfo_exit(void)
{
remove_proc_subtree("sgi_prominfo", NULL);
}

View file

@ -1,92 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/types.h>
#include <asm/sn/shub_mmr.h>
#define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
#define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
#define ALIAS_OFFSET 8
.global sn2_ptc_deadlock_recovery_core
.proc sn2_ptc_deadlock_recovery_core
sn2_ptc_deadlock_recovery_core:
.regstk 6,0,0,0
ptc0 = in0
data0 = in1
ptc1 = in2
data1 = in3
piowc = in4
zeroval = in5
piowcphy = r30
psrsave = r2
scr1 = r16
scr2 = r17
mask = r18
extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address
dep piowcphy=-1,piowcphy,63,1
movl mask=WRITECOUNTMASK
mov r8=r0
1:
cmp.ne p8,p9=r0,ptc1 // Test for shub type (ptc1 non-null on shub1)
// p8 = 1 if shub1, p9 = 1 if shub2
add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register
mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR
(p8) st8.rel [scr2]=scr1;;
(p9) ld8.acq scr1=[scr2];;
5: ld8.acq scr1=[piowc];; // Wait for PIOs to complete.
hint @pause
and scr2=scr1,mask;; // mask of writecount bits
cmp.ne p6,p0=zeroval,scr2
(p6) br.cond.sptk 5b
////////////// BEGIN PHYSICAL MODE ////////////////////
mov psrsave=psr // Disable IC (no PMIs)
rsm psr.i | psr.dt | psr.ic;;
srlz.i;;
st8.rel [ptc0]=data0 // Write PTC0 & wait for completion.
5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
hint @pause
and scr2=scr1,mask;; // mask of writecount bits
cmp.ne p6,p0=zeroval,scr2
(p6) br.cond.sptk 5b;;
tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK
(p7) cmp.ne p7,p0=r0,ptc1;; // Test for non-null ptc1
(p7) st8.rel [ptc1]=data1;; // Now write PTC1.
5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
hint @pause
and scr2=scr1,mask;; // mask of writecount bits
cmp.ne p6,p0=zeroval,scr2
(p6) br.cond.sptk 5b
tbit.nz p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK
mov psr.l=psrsave;; // Reenable IC
srlz.i;;
////////////// END PHYSICAL MODE ////////////////////
(p8) add r8=1,r8
(p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred.
br.ret.sptk rp
.endp sn2_ptc_deadlock_recovery_core

View file

@ -1,577 +0,0 @@
/*
* SN2 Platform specific SMP Support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mmzone.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/nodemask.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/sal.h>
#include <asm/delay.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/tlb.h>
#include <asm/numa.h>
#include <asm/hw_irq.h>
#include <asm/current.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/rw_mmr.h>
#include <asm/sn/sn_feature_sets.h>
DEFINE_PER_CPU(struct ptc_stats, ptcstats);
DECLARE_PER_CPU(struct ptc_stats, ptcstats);
static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
/* 0 = old algorithm (no IPI flushes), 1 = ipi deadlock flush, 2 = ipi instead of SHUB ptc, >2 = always ipi */
static int sn2_flush_opt = 0;
extern unsigned long
sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
volatile unsigned long *, unsigned long,
volatile unsigned long *, unsigned long);
void
sn2_ptc_deadlock_recovery(nodemask_t, short, short, int,
volatile unsigned long *, unsigned long,
volatile unsigned long *, unsigned long);
/*
* Note: some is the following is captured here to make degugging easier
* (the macros make more sense if you see the debug patch - not posted)
*/
#define sn2_ptctest 0
#define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0)
#define max_active_pio(sh1) ((sh1) ? 32 : 7)
#define reset_max_active_on_deadlock() 1
#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock)
struct ptc_stats {
unsigned long ptc_l;
unsigned long change_rid;
unsigned long shub_ptc_flushes;
unsigned long nodes_flushed;
unsigned long deadlocks;
unsigned long deadlocks2;
unsigned long lock_itc_clocks;
unsigned long shub_itc_clocks;
unsigned long shub_itc_clocks_max;
unsigned long shub_ptc_flushes_not_my_mm;
unsigned long shub_ipi_flushes;
unsigned long shub_ipi_flushes_itc_clocks;
};
#define sn2_ptctest 0
static inline unsigned long wait_piowc(void)
{
volatile unsigned long *piows;
unsigned long zeroval, ws;
piows = pda->pio_write_status_addr;
zeroval = pda->pio_write_status_val;
do {
cpu_relax();
} while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0;
}
/**
* sn_migrate - SN-specific task migration actions
* @task: Task being migrated to new CPU
*
* SN2 PIO writes from separate CPUs are not guaranteed to arrive in order.
* Context switching user threads which have memory-mapped MMIO may cause
* PIOs to issue from separate CPUs, thus the PIO writes must be drained
* from the previous CPU's Shub before execution resumes on the new CPU.
*/
void sn_migrate(struct task_struct *task)
{
pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu);
volatile unsigned long *adr = last_pda->pio_write_status_addr;
unsigned long val = last_pda->pio_write_status_val;
/* Drain PIO writes from old CPU's Shub */
while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK)
!= val))
cpu_relax();
}
static void
sn2_ipi_flush_all_tlb(struct mm_struct *mm)
{
unsigned long itc;
itc = ia64_get_itc();
smp_flush_tlb_cpumask(*mm_cpumask(mm));
itc = ia64_get_itc() - itc;
__this_cpu_add(ptcstats.shub_ipi_flushes_itc_clocks, itc);
__this_cpu_inc(ptcstats.shub_ipi_flushes);
}
/**
* sn2_global_tlb_purge - globally purge translation cache of virtual address range
* @mm: mm_struct containing virtual address range
* @start: start of virtual address range
* @end: end of virtual address range
* @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
*
* Purges the translation caches of all processors of the given virtual address
* range.
*
* Note:
* - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
* - cpu_vm_mask is converted into a nodemask of the nodes containing the
* cpus in cpu_vm_mask.
* - if only one bit is set in cpu_vm_mask & it is the current cpu & the
* process is purging its own virtual address range, then only the
* local TLB needs to be flushed. This flushing can be done using
* ptc.l. This is the common case & avoids the global spinlock.
* - if multiple cpus have loaded the context, then flushing has to be
* done with ptc.g/MMRs under protection of the global ptc_lock.
*/
void
sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long nbits)
{
int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid;
int mymm = (mm == current->active_mm && mm == current->mm);
int use_cpu_ptcga;
volatile unsigned long *ptc0, *ptc1;
unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0;
short nix;
nodemask_t nodes_flushed;
int active, max_active, deadlock, flush_opt = sn2_flush_opt;
if (flush_opt > 2) {
sn2_ipi_flush_all_tlb(mm);
return;
}
nodes_clear(nodes_flushed);
i = 0;
for_each_cpu(cpu, mm_cpumask(mm)) {
cnode = cpu_to_node(cpu);
node_set(cnode, nodes_flushed);
lcpu = cpu;
i++;
}
if (i == 0)
return;
preempt_disable();
if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) {
do {
ia64_ptcl(start, nbits << 2);
start += (1UL << nbits);
} while (start < end);
ia64_srlz_i();
__this_cpu_inc(ptcstats.ptc_l);
preempt_enable();
return;
}
if (atomic_read(&mm->mm_users) == 1 && mymm) {
flush_tlb_mm(mm);
__this_cpu_inc(ptcstats.change_rid);
preempt_enable();
return;
}
if (flush_opt == 2) {
sn2_ipi_flush_all_tlb(mm);
preempt_enable();
return;
}
itc = ia64_get_itc();
nix = nodes_weight(nodes_flushed);
rr_value = (mm->context << 3) | REGION_NUMBER(start);
shub1 = is_shub1();
if (shub1) {
data0 = (1UL << SH1_PTC_0_A_SHFT) |
(nbits << SH1_PTC_0_PS_SHFT) |
(rr_value << SH1_PTC_0_RID_SHFT) |
(1UL << SH1_PTC_0_START_SHFT);
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
} else {
data0 = (1UL << SH2_PTC_A_SHFT) |
(nbits << SH2_PTC_PS_SHFT) |
(1UL << SH2_PTC_START_SHFT);
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
(rr_value << SH2_PTC_RID_SHFT));
ptc1 = NULL;
}
mynasid = get_nasid();
use_cpu_ptcga = local_node_uses_ptc_ga(shub1);
max_active = max_active_pio(shub1);
itc = ia64_get_itc();
spin_lock_irqsave(PTC_LOCK(shub1), flags);
itc2 = ia64_get_itc();
__this_cpu_add(ptcstats.lock_itc_clocks, itc2 - itc);
__this_cpu_inc(ptcstats.shub_ptc_flushes);
__this_cpu_add(ptcstats.nodes_flushed, nix);
if (!mymm)
__this_cpu_inc(ptcstats.shub_ptc_flushes_not_my_mm);
if (use_cpu_ptcga && !mymm) {
old_rr = ia64_get_rr(start);
ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8));
ia64_srlz_d();
}
wait_piowc();
do {
if (shub1)
data1 = start | (1UL << SH1_PTC_1_START_SHFT);
else
data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
deadlock = 0;
active = 0;
ibegin = 0;
i = 0;
for_each_node_mask(cnode, nodes_flushed) {
nasid = cnodeid_to_nasid(cnode);
if (use_cpu_ptcga && unlikely(nasid == mynasid)) {
ia64_ptcga(start, nbits << 2);
ia64_srlz_i();
} else {
ptc0 = CHANGE_NASID(nasid, ptc0);
if (ptc1)
ptc1 = CHANGE_NASID(nasid, ptc1);
pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
active++;
}
if (active >= max_active || i == (nix - 1)) {
if ((deadlock = wait_piowc())) {
if (flush_opt == 1)
goto done;
sn2_ptc_deadlock_recovery(nodes_flushed, ibegin, i, mynasid, ptc0, data0, ptc1, data1);
if (reset_max_active_on_deadlock())
max_active = 1;
}
active = 0;
ibegin = i + 1;
}
i++;
}
start += (1UL << nbits);
} while (start < end);
done:
itc2 = ia64_get_itc() - itc2;
__this_cpu_add(ptcstats.shub_itc_clocks, itc2);
if (itc2 > __this_cpu_read(ptcstats.shub_itc_clocks_max))
__this_cpu_write(ptcstats.shub_itc_clocks_max, itc2);
if (old_rr) {
ia64_set_rr(start, old_rr);
ia64_srlz_d();
}
spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
if (flush_opt == 1 && deadlock) {
__this_cpu_inc(ptcstats.deadlocks);
sn2_ipi_flush_all_tlb(mm);
}
preempt_enable();
}
/*
* sn2_ptc_deadlock_recovery
*
* Recover from PTC deadlocks conditions. Recovery requires stepping thru each
* TLB flush transaction. The recovery sequence is somewhat tricky & is
* coded in assembly language.
*/
void
sn2_ptc_deadlock_recovery(nodemask_t nodes, short ib, short ie, int mynasid,
volatile unsigned long *ptc0, unsigned long data0,
volatile unsigned long *ptc1, unsigned long data1)
{
short nasid, i;
int cnode;
unsigned long *piows, zeroval, n;
__this_cpu_inc(ptcstats.deadlocks);
piows = (unsigned long *) pda->pio_write_status_addr;
zeroval = pda->pio_write_status_val;
i = 0;
for_each_node_mask(cnode, nodes) {
if (i < ib)
goto next;
if (i > ie)
break;
nasid = cnodeid_to_nasid(cnode);
if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid)
goto next;
ptc0 = CHANGE_NASID(nasid, ptc0);
if (ptc1)
ptc1 = CHANGE_NASID(nasid, ptc1);
n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
__this_cpu_add(ptcstats.deadlocks2, n);
next:
i++;
}
}
/**
* sn_send_IPI_phys - send an IPI to a Nasid and slice
* @nasid: nasid to receive the interrupt (may be outside partition)
* @physid: physical cpuid to receive the interrupt.
* @vector: command to send
* @delivery_mode: delivery mechanism
*
* Sends an IPI (interprocessor interrupt) to the processor specified by
* @physid
*
* @delivery_mode can be one of the following
*
* %IA64_IPI_DM_INT - pend an interrupt
* %IA64_IPI_DM_PMI - pend a PMI
* %IA64_IPI_DM_NMI - pend an NMI
* %IA64_IPI_DM_INIT - pend an INIT interrupt
*/
void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode)
{
long val;
unsigned long flags = 0;
volatile long *p;
p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
val = (1UL << SH_IPI_INT_SEND_SHFT) |
(physid << SH_IPI_INT_PID_SHFT) |
((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
((long)vector << SH_IPI_INT_IDX_SHFT) |
(0x000feeUL << SH_IPI_INT_BASE_SHFT);
mb();
if (enable_shub_wars_1_1()) {
spin_lock_irqsave(&sn2_global_ptc_lock, flags);
}
pio_phys_write_mmr(p, val);
if (enable_shub_wars_1_1()) {
wait_piowc();
spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
}
}
EXPORT_SYMBOL(sn_send_IPI_phys);
/**
* sn2_send_IPI - send an IPI to a processor
* @cpuid: target of the IPI
* @vector: command to send
* @delivery_mode: delivery mechanism
* @redirect: redirect the IPI?
*
* Sends an IPI (InterProcessor Interrupt) to the processor specified by
* @cpuid. @vector specifies the command to send, while @delivery_mode can
* be one of the following
*
* %IA64_IPI_DM_INT - pend an interrupt
* %IA64_IPI_DM_PMI - pend a PMI
* %IA64_IPI_DM_NMI - pend an NMI
* %IA64_IPI_DM_INIT - pend an INIT interrupt
*/
void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
{
long physid;
int nasid;
physid = cpu_physical_id(cpuid);
nasid = cpuid_to_nasid(cpuid);
/* the following is used only when starting cpus at boot time */
if (unlikely(nasid == -1))
ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
}
#ifdef CONFIG_HOTPLUG_CPU
/**
* sn_cpu_disable_allowed - Determine if a CPU can be disabled.
* @cpu - CPU that is requested to be disabled.
*
* CPU disable is only allowed on SHub2 systems running with a PROM
* that supports CPU disable. It is not permitted to disable the boot processor.
*/
bool sn_cpu_disable_allowed(int cpu)
{
if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) {
if (cpu != 0)
return true;
else
printk(KERN_WARNING
"Disabling the boot processor is not allowed.\n");
} else
printk(KERN_WARNING
"CPU disable is not supported on this system.\n");
return false;
}
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PROC_FS
#define PTC_BASENAME "sgi_sn/ptc_statistics"
static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
{
if (*offset < nr_cpu_ids)
return offset;
return NULL;
}
static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
{
(*offset)++;
if (*offset < nr_cpu_ids)
return offset;
return NULL;
}
static void sn2_ptc_seq_stop(struct seq_file *file, void *data)
{
}
static int sn2_ptc_seq_show(struct seq_file *file, void *data)
{
struct ptc_stats *stat;
int cpu;
cpu = *(loff_t *) data;
if (!cpu) {
seq_printf(file,
"# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2 ipi_fluches ipi_nsec\n");
seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt);
}
if (cpu < nr_cpu_ids && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu);
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
stat->deadlocks,
1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
stat->shub_ptc_flushes_not_my_mm,
stat->deadlocks2,
stat->shub_ipi_flushes,
1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
}
return 0;
}
static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, size_t count, loff_t *data)
{
int cpu;
char optstr[64];
if (count == 0 || count > sizeof(optstr))
return -EINVAL;
if (copy_from_user(optstr, user, count))
return -EFAULT;
optstr[count - 1] = '\0';
sn2_flush_opt = simple_strtoul(optstr, NULL, 0);
for_each_online_cpu(cpu)
memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats));
return count;
}
static const struct seq_operations sn2_ptc_seq_ops = {
.start = sn2_ptc_seq_start,
.next = sn2_ptc_seq_next,
.stop = sn2_ptc_seq_stop,
.show = sn2_ptc_seq_show
};
static int sn2_ptc_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &sn2_ptc_seq_ops);
}
static const struct file_operations proc_sn2_ptc_operations = {
.open = sn2_ptc_proc_open,
.read = seq_read,
.write = sn2_ptc_proc_write,
.llseek = seq_lseek,
.release = seq_release,
};
static struct proc_dir_entry *proc_sn2_ptc;
static int __init sn2_ptc_init(void)
{
if (!ia64_platform_is("sn2"))
return 0;
proc_sn2_ptc = proc_create(PTC_BASENAME, 0444,
NULL, &proc_sn2_ptc_operations);
if (!proc_sn2_ptc) {
printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
return -EINVAL;
}
spin_lock_init(&sn2_global_ptc_lock);
return 0;
}
static void __exit sn2_ptc_exit(void)
{
remove_proc_entry(PTC_BASENAME, NULL);
}
module_init(sn2_ptc_init);
module_exit(sn2_ptc_exit);
#endif /* CONFIG_PROC_FS */

File diff suppressed because it is too large Load diff

View file

@ -1,69 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <asm/sn/sn_sal.h>
static int partition_id_show(struct seq_file *s, void *p)
{
seq_printf(s, "%d\n", sn_partition_id);
return 0;
}
static int system_serial_number_show(struct seq_file *s, void *p)
{
seq_printf(s, "%s\n", sn_system_serial_number());
return 0;
}
static int licenseID_show(struct seq_file *s, void *p)
{
seq_printf(s, "0x%llx\n", sn_partition_serial_number_val());
return 0;
}
static int coherence_id_show(struct seq_file *s, void *p)
{
seq_printf(s, "%d\n", partition_coherence_id());
return 0;
}
/* /proc/sgi_sn/sn_topology uses seq_file, see sn_hwperf.c */
extern int sn_topology_open(struct inode *, struct file *);
extern int sn_topology_release(struct inode *, struct file *);
static const struct file_operations proc_sn_topo_fops = {
.open = sn_topology_open,
.read = seq_read,
.llseek = seq_lseek,
.release = sn_topology_release,
};
void register_sn_procfs(void)
{
static struct proc_dir_entry *sgi_proc_dir = NULL;
BUG_ON(sgi_proc_dir != NULL);
if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
return;
proc_create_single("partition_id", 0444, sgi_proc_dir,
partition_id_show);
proc_create_single("system_serial_number", 0444, sgi_proc_dir,
system_serial_number_show);
proc_create_single("licenseID", 0444, sgi_proc_dir, licenseID_show);
proc_create_single("coherence_id", 0444, sgi_proc_dir,
coherence_id_show);
proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops);
}
#endif /* CONFIG_PROC_FS */

View file

@ -1,61 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/ia64/sn/kernel/sn2/timer.c
*
* Copyright (C) 2003 Silicon Graphics, Inc.
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger <davidm@hpl.hp.com>: updated for new timer-interpolation infrastructure
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include <asm/hw_irq.h>
#include <asm/timex.h>
#include <asm/sn/leds.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/clksupport.h>
extern unsigned long sn_rtc_cycles_per_second;
static u64 read_sn2(struct clocksource *cs)
{
return (u64)readq(RTC_COUNTER_ADDR);
}
static struct clocksource clocksource_sn2 = {
.name = "sn2_rtc",
.rating = 450,
.read = read_sn2,
.mask = (1LL << 55) - 1,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/*
* sn udelay uses the RTC instead of the ITC because the ITC is not
* synchronized across all CPUs, and the thread may migrate to another CPU
* if preemption is enabled.
*/
static void
ia64_sn_udelay (unsigned long usecs)
{
unsigned long start = rtc_time();
unsigned long end = start +
usecs * sn_rtc_cycles_per_second / 1000000;
while (time_before((unsigned long)rtc_time(), end))
cpu_relax();
}
void __init sn_timer_init(void)
{
clocksource_sn2.archdata.fsys_mmio = RTC_COUNTER_ADDR;
clocksource_register_hz(&clocksource_sn2, sn_rtc_cycles_per_second);
ia64_udelay = &ia64_sn_udelay;
}

View file

@ -1,60 +0,0 @@
/*
*
*
* Copyright (c) 2005, 2006 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
#include <linux/interrupt.h>
#include <asm/sn/pda.h>
#include <asm/sn/leds.h>
extern void sn_lb_int_war_check(void);
extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
#define SN_LB_INT_WAR_INTERVAL 100
void sn_timer_interrupt(int irq, void *dev_id)
{
/* LED blinking */
if (!pda->hb_count--) {
pda->hb_count = HZ / 2;
set_led_bits(pda->hb_state ^=
LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT);
}
if (is_shub1()) {
if (enable_shub_wars_1_1()) {
/* Bugfix code for SHUB 1.1 */
if (pda->pio_shub_war_cam_addr)
*pda->pio_shub_war_cam_addr = 0x8000000000000010UL;
}
if (pda->sn_lb_int_war_ticks == 0)
sn_lb_int_war_check();
pda->sn_lb_int_war_ticks++;
if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL)
pda->sn_lb_int_war_ticks = 0;
}
}

View file

@ -1,10 +0,0 @@
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn pci general routines.
obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/

View file

@ -1,446 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
*
* Routines for PCI DMA mapping. See Documentation/DMA-API.txt for
* a description of how these routines should be used.
*/
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <asm/dma.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
/**
* sn_dma_supported - test a DMA mask
* @dev: device to test
* @mask: DMA mask to test
*
* Return whether the given PCI device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during PCI bus mastering, then you would pass 0x00ffffff as the mask to
* this function. Of course, SN only supports devices that have 32 or more
* address bits when using the PMU.
*/
static int sn_dma_supported(struct device *dev, u64 mask)
{
BUG_ON(!dev_is_pci(dev));
if (mask < 0x7fffffff)
return 0;
return 1;
}
/**
* sn_dma_set_mask - set the DMA mask
* @dev: device to set
* @dma_mask: new mask
*
* Set @dev's DMA mask if the hw supports it.
*/
int sn_dma_set_mask(struct device *dev, u64 dma_mask)
{
BUG_ON(!dev_is_pci(dev));
if (!sn_dma_supported(dev, dma_mask))
return 0;
*dev->dma_mask = dma_mask;
return 1;
}
EXPORT_SYMBOL(sn_dma_set_mask);
/**
* sn_dma_alloc_coherent - allocate memory for coherent DMA
* @dev: device to allocate for
* @size: size of the region
* @dma_handle: DMA (bus) address
* @flags: memory allocation flags
*
* dma_alloc_coherent() returns a pointer to a memory region suitable for
* coherent DMA traffic to/from a PCI device. On SN platforms, this means
* that @dma_handle will have the %PCIIO_DMA_CMD flag set.
*
* This interface is usually used for "command" streams (e.g. the command
* queue for a SCSI controller). See Documentation/DMA-API.txt for
* more information.
*/
static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t flags,
unsigned long attrs)
{
void *cpuaddr;
unsigned long phys_addr;
int node;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(!dev_is_pci(dev));
/*
* Allocate the memory.
*/
node = pcibus_to_node(pdev->bus);
if (likely(node >=0)) {
struct page *p = __alloc_pages_node(node,
flags, get_order(size));
if (likely(p))
cpuaddr = page_address(p);
else
return NULL;
} else
cpuaddr = (void *)__get_free_pages(flags, get_order(size));
if (unlikely(!cpuaddr))
return NULL;
memset(cpuaddr, 0x0, size);
/* physical addr. of the memory we just got */
phys_addr = __pa(cpuaddr);
/*
* 64 bit address translations should never fail.
* 32 bit translations can fail if there are insufficient mapping
* resources.
*/
*dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
SN_DMA_ADDR_PHYS);
if (!*dma_handle) {
printk(KERN_ERR "%s: out of ATEs\n", __func__);
free_pages((unsigned long)cpuaddr, get_order(size));
return NULL;
}
return cpuaddr;
}
/**
* sn_pci_free_coherent - free memory associated with coherent DMAable region
* @dev: device to free for
* @size: size to free
* @cpu_addr: kernel virtual address to free
* @dma_handle: DMA address associated with this region
*
* Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
* any associated IOMMU mappings.
*/
static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, unsigned long attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(!dev_is_pci(dev));
provider->dma_unmap(pdev, dma_handle, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
/**
* sn_dma_map_single_attrs - map a single page for DMA
* @dev: device to map for
* @cpu_addr: kernel virtual address of the region to map
* @size: size of the region
* @direction: DMA direction
* @attrs: optional dma attributes
*
* Map the region pointed to by @cpu_addr for DMA and return the
* DMA address.
*
* We map this to the one step pcibr_dmamap_trans interface rather than
* the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
* no way of saving the dmamap handle from the alloc to later free
* (which is pretty much unacceptable).
*
* mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
* dma_map_consistent() so that writes force a flush of pending DMA.
* (See "SGI Altix Architecture Considerations for Linux Device Drivers",
* Document Number: 007-4763-001)
*
* TODO: simplify our interface;
* figure out how to save dmamap handle so can use two step.
*/
static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
void *cpu_addr = page_address(page) + offset;
dma_addr_t dma_addr;
unsigned long phys_addr;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(!dev_is_pci(dev));
phys_addr = __pa(cpu_addr);
if (attrs & DMA_ATTR_WRITE_BARRIER)
dma_addr = provider->dma_map_consistent(pdev, phys_addr,
size, SN_DMA_ADDR_PHYS);
else
dma_addr = provider->dma_map(pdev, phys_addr, size,
SN_DMA_ADDR_PHYS);
if (!dma_addr) {
printk(KERN_ERR "%s: out of ATEs\n", __func__);
return DMA_MAPPING_ERROR;
}
return dma_addr;
}
/**
* sn_dma_unmap_single_attrs - unamp a DMA mapped page
* @dev: device to sync
* @dma_addr: DMA address to sync
* @size: size of region
* @direction: DMA direction
* @attrs: optional dma attributes
*
* This routine is supposed to sync the DMA region specified
* by @dma_handle into the coherence domain. On SN, we're always cache
* coherent, so we just need to free any ATEs associated with this mapping.
*/
static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(!dev_is_pci(dev));
provider->dma_unmap(pdev, dma_addr, dir);
}
/**
* sn_dma_unmap_sg - unmap a DMA scatterlist
* @dev: device to unmap
* @sg: scatterlist to unmap
* @nhwentries: number of scatterlist entries
* @direction: DMA direction
* @attrs: optional dma attributes
*
* Unmap a set of streaming mode DMA translations.
*/
static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nhwentries, enum dma_data_direction dir,
unsigned long attrs)
{
int i;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
struct scatterlist *sg;
BUG_ON(!dev_is_pci(dev));
for_each_sg(sgl, sg, nhwentries, i) {
provider->dma_unmap(pdev, sg->dma_address, dir);
sg->dma_address = (dma_addr_t) NULL;
sg->dma_length = 0;
}
}
/**
* sn_dma_map_sg - map a scatterlist for DMA
* @dev: device to map for
* @sg: scatterlist to map
* @nhwentries: number of entries
* @direction: direction of the DMA transaction
* @attrs: optional dma attributes
*
* mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
* dma_map_consistent() so that writes force a flush of pending DMA.
* (See "SGI Altix Architecture Considerations for Linux Device Drivers",
* Document Number: 007-4763-001)
*
* Maps each entry of @sg for DMA.
*/
static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
int nhwentries, enum dma_data_direction dir,
unsigned long attrs)
{
unsigned long phys_addr;
struct scatterlist *saved_sg = sgl, *sg;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int i;
BUG_ON(!dev_is_pci(dev));
/*
* Setup a DMA address for each entry in the scatterlist.
*/
for_each_sg(sgl, sg, nhwentries, i) {
dma_addr_t dma_addr;
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
if (attrs & DMA_ATTR_WRITE_BARRIER)
dma_addr = provider->dma_map_consistent(pdev,
phys_addr,
sg->length,
SN_DMA_ADDR_PHYS);
else
dma_addr = provider->dma_map(pdev, phys_addr,
sg->length,
SN_DMA_ADDR_PHYS);
sg->dma_address = dma_addr;
if (!sg->dma_address) {
printk(KERN_ERR "%s: out of ATEs\n", __func__);
/*
* Free any successfully allocated entries.
*/
if (i > 0)
sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
return 0;
}
sg->dma_length = sg->length;
}
return nhwentries;
}
static u64 sn_dma_get_required_mask(struct device *dev)
{
return DMA_BIT_MASK(64);
}
char *sn_pci_get_legacy_mem(struct pci_bus *bus)
{
if (!SN_PCIBUS_BUSSOFT(bus))
return ERR_PTR(-ENODEV);
return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
}
int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
{
unsigned long addr;
int ret;
struct ia64_sal_retval isrv;
/*
* First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
* around hw issues at the pci bus level. SGI proms older than
* 4.10 don't implement this.
*/
SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
pci_domain_nr(bus), bus->number,
0, /* io */
0, /* read */
port, size, __pa(val));
if (isrv.status == 0)
return size;
/*
* If the above failed, retry using the SAL_PROBE call which should
* be present in all proms (but which cannot work round PCI chipset
* bugs). This code is retained for compatibility with old
* pre-4.10 proms, and should be removed at some point in the future.
*/
if (!SN_PCIBUS_BUSSOFT(bus))
return -ENODEV;
addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
addr += port;
ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
if (ret == 2)
return -EINVAL;
if (ret == 1)
*val = -1;
return size;
}
int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
{
int ret = size;
unsigned long paddr;
unsigned long *addr;
struct ia64_sal_retval isrv;
/*
* First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
* around hw issues at the pci bus level. SGI proms older than
* 4.10 don't implement this.
*/
SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
pci_domain_nr(bus), bus->number,
0, /* io */
1, /* write */
port, size, __pa(&val));
if (isrv.status == 0)
return size;
/*
* If the above failed, retry using the SAL_PROBE call which should
* be present in all proms (but which cannot work round PCI chipset
* bugs). This code is retained for compatibility with old
* pre-4.10 proms, and should be removed at some point in the future.
*/
if (!SN_PCIBUS_BUSSOFT(bus)) {
ret = -ENODEV;
goto out;
}
/* Put the phys addr in uncached space */
paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
paddr += port;
addr = (unsigned long *)paddr;
switch (size) {
case 1:
*(volatile u8 *)(addr) = (u8)(val);
break;
case 2:
*(volatile u16 *)(addr) = (u16)(val);
break;
case 4:
*(volatile u32 *)(addr) = (u32)(val);
break;
default:
ret = -EINVAL;
break;
}
out:
return ret;
}
static struct dma_map_ops sn_dma_ops = {
.alloc = sn_dma_alloc_coherent,
.free = sn_dma_free_coherent,
.map_page = sn_dma_map_page,
.unmap_page = sn_dma_unmap_page,
.map_sg = sn_dma_map_sg,
.unmap_sg = sn_dma_unmap_sg,
.dma_supported = sn_dma_supported,
.get_required_mask = sn_dma_get_required_mask,
};
void sn_dma_init(void)
{
dma_ops = &sn_dma_ops;
}

View file

@ -1,13 +0,0 @@
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn2 io routines.
ccflags-y := -I $(srctree)/arch/ia64/sn/include
obj-y += pcibr_dma.o pcibr_reg.o \
pcibr_ate.o pcibr_provider.o

View file

@ -1,177 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
int pcibr_invalidate_ate; /* by default don't invalidate ATE on free */
/*
* mark_ate: Mark the ate as either free or inuse.
*/
static void mark_ate(struct ate_resource *ate_resource, int start, int number,
u64 value)
{
u64 *ate = ate_resource->ate;
int index;
int length = 0;
for (index = start; length < number; index++, length++)
ate[index] = value;
}
/*
* find_free_ate: Find the first free ate index starting from the given
* index for the desired consecutive count.
*/
static int find_free_ate(struct ate_resource *ate_resource, int start,
int count)
{
u64 *ate = ate_resource->ate;
int index;
int start_free;
for (index = start; index < ate_resource->num_ate;) {
if (!ate[index]) {
int i;
int free;
free = 0;
start_free = index; /* Found start free ate */
for (i = start_free; i < ate_resource->num_ate; i++) {
if (!ate[i]) { /* This is free */
if (++free == count)
return start_free;
} else {
index = i + 1;
break;
}
}
if (i >= ate_resource->num_ate)
return -1;
} else
index++; /* Try next ate */
}
return -1;
}
/*
* free_ate_resource: Free the requested number of ATEs.
*/
static inline void free_ate_resource(struct ate_resource *ate_resource,
int start)
{
mark_ate(ate_resource, start, ate_resource->ate[start], 0);
if ((ate_resource->lowest_free_index > start) ||
(ate_resource->lowest_free_index < 0))
ate_resource->lowest_free_index = start;
}
/*
* alloc_ate_resource: Allocate the requested number of ATEs.
*/
static inline int alloc_ate_resource(struct ate_resource *ate_resource,
int ate_needed)
{
int start_index;
/*
* Check for ate exhaustion.
*/
if (ate_resource->lowest_free_index < 0)
return -1;
/*
* Find the required number of free consecutive ates.
*/
start_index =
find_free_ate(ate_resource, ate_resource->lowest_free_index,
ate_needed);
if (start_index >= 0)
mark_ate(ate_resource, start_index, ate_needed, ate_needed);
ate_resource->lowest_free_index =
find_free_ate(ate_resource, ate_resource->lowest_free_index, 1);
return start_index;
}
/*
* Allocate "count" contiguous Bridge Address Translation Entries
* on the specified bridge to be used for PCI to XTALK mappings.
* Indices in rm map range from 1..num_entries. Indices returned
* to caller range from 0..num_entries-1.
*
* Return the start index on success, -1 on failure.
*/
int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
{
int status;
unsigned long flags;
spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
return status;
}
/*
* Setup an Address Translation Entry as specified. Use either the Bridge
* internal maps or the external map RAM, as appropriate.
*/
static inline u64 __iomem *pcibr_ate_addr(struct pcibus_info *pcibus_info,
int ate_index)
{
if (ate_index < pcibus_info->pbi_int_ate_size) {
return pcireg_int_ate_addr(pcibus_info, ate_index);
}
panic("pcibr_ate_addr: invalid ate_index 0x%x", ate_index);
}
/*
* Update the ate.
*/
inline void
ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
volatile u64 ate)
{
while (count-- > 0) {
if (ate_index < pcibus_info->pbi_int_ate_size) {
pcireg_int_ate_set(pcibus_info, ate_index, ate);
} else {
panic("ate_write: invalid ate_index 0x%x", ate_index);
}
ate_index++;
ate += IOPGSIZE;
}
pcireg_tflush_get(pcibus_info); /* wait until Bridge PIO complete */
}
void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
{
volatile u64 ate;
int count;
unsigned long flags;
if (pcibr_invalidate_ate) {
/* For debugging purposes, clear the valid bit in the ATE */
ate = *pcibr_ate_addr(pcibus_info, index);
count = pcibus_info->pbi_int_ate_resource.ate[index];
ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V));
}
spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
free_ate_resource(&pcibus_info->pbi_int_ate_resource, index);
spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
}

View file

@ -1,413 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pic.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/tiocp.h>
#include "tio.h"
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
extern int sn_ioif_inited;
/* =====================================================================
* DMA MANAGEMENT
*
* The Bridge ASIC provides three methods of doing DMA: via a "direct map"
* register available in 32-bit PCI space (which selects a contiguous 2G
* address space on some other widget), via "direct" addressing via 64-bit
* PCI space (all destination information comes from the PCI address,
* including transfer attributes), and via a "mapped" region that allows
* a bunch of different small mappings to be established with the PMU.
*
* For efficiency, we most prefer to use the 32bit direct mapping facility,
* since it requires no resource allocations. The advantage of using the
* PMU over the 64-bit direct is that single-cycle PCI addressing can be
* used; the advantage of using 64-bit direct over PMU addressing is that
* we do not have to allocate entries in the PMU.
*/
static dma_addr_t
pcibr_dmamap_ate32(struct pcidev_info *info,
u64 paddr, size_t req_size, u64 flags, int dma_flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
pdi_linux_pcidev->devfn)) - 1;
int ate_count;
int ate_index;
u64 ate_flags = flags | PCI32_ATE_V;
u64 ate;
u64 pci_addr;
u64 xio_addr;
u64 offset;
/* PIC in PCI-X mode does not supports 32bit PageMap mode */
if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
return 0;
}
/* Calculate the number of ATEs needed. */
if (!(MINIMAL_ATE_FLAG(paddr, req_size))) {
ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
+req_size /* max mapping bytes */
- 1) + 1; /* round UP */
} else { /* assume requested target is page aligned */
ate_count = IOPG(req_size /* max mapping bytes */
- 1) + 1; /* round UP */
}
/* Get the number of ATEs required. */
ate_index = pcibr_ate_alloc(pcibus_info, ate_count);
if (ate_index < 0)
return 0;
/* In PCI-X mode, Prefetch not supported */
if (IS_PCIX(pcibus_info))
ate_flags &= ~(PCI32_ATE_PREF);
if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr);
else
xio_addr = paddr;
offset = IOPGOFF(xio_addr);
ate = ate_flags | (xio_addr - offset);
/* If PIC, put the targetid in the ATE */
if (IS_PIC_SOFT(pcibus_info)) {
ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
}
/*
* If we're mapping for MSI, set the MSI bit in the ATE. If it's a
* TIOCP based pci bus, we also need to set the PIO bit in the ATE.
*/
if (dma_flags & SN_DMA_MSI) {
ate |= PCI32_ATE_MSI;
if (IS_TIOCP_SOFT(pcibus_info))
ate |= PCI32_ATE_PIO;
}
ate_write(pcibus_info, ate_index, ate_count, ate);
/*
* Set up the DMA mapped Address.
*/
pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index;
/*
* If swap was set in device in pcibr_endian_set()
* we need to turn swapping on.
*/
if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
ATE_SWAP_ON(pci_addr);
return pci_addr;
}
static dma_addr_t
pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
u64 dma_attributes, int dma_flags)
{
struct pcibus_info *pcibus_info = (struct pcibus_info *)
((info->pdi_host_pcidev_info)->pdi_pcibus_info);
u64 pci_addr;
/* Translate to Crosstalk View of Physical Address */
if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
pci_addr = IS_PIC_SOFT(pcibus_info) ?
PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr);
else
pci_addr = paddr;
pci_addr |= dma_attributes;
/* Handle Bus mode */
if (IS_PCIX(pcibus_info))
pci_addr &= ~PCI64_ATTR_PREF;
/* Handle Bridge Chipset differences */
if (IS_PIC_SOFT(pcibus_info)) {
pci_addr |=
((u64) pcibus_info->
pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
} else
pci_addr |= (dma_flags & SN_DMA_MSI) ?
TIOCP_PCI64_CMDTYPE_MSI :
TIOCP_PCI64_CMDTYPE_MEM;
/* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
pci_addr |= PCI64_ATTR_VIRTUAL;
return pci_addr;
}
static dma_addr_t
pcibr_dmatrans_direct32(struct pcidev_info * info,
u64 paddr, size_t req_size, u64 flags, int dma_flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
u64 xio_addr;
u64 xio_base;
u64 offset;
u64 endoff;
if (IS_PCIX(pcibus_info)) {
return 0;
}
if (dma_flags & SN_DMA_MSI)
return 0;
if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr);
else
xio_addr = paddr;
xio_base = pcibus_info->pbi_dir_xbase;
offset = xio_addr - xio_base;
endoff = req_size + offset;
if ((req_size > (1ULL << 31)) || /* Too Big */
(xio_addr < xio_base) || /* Out of range for mappings */
(endoff > (1ULL << 31))) { /* Too Big */
return 0;
}
return PCI32_DIRECT_BASE | offset;
}
/*
* Wrapper routine for freeing DMA maps
* DMA mappings for Direct 64 and 32 do not have any DMA maps.
*/
void
pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
{
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
struct pcibus_info *pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_pcibus_info;
if (IS_PCI32_MAPPED(dma_handle)) {
int ate_index;
ate_index =
IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE));
pcibr_ate_free(pcibus_info, ate_index);
}
}
/*
* On SN systems there is a race condition between a PIO read response and
* DMA's. In rare cases, the read response may beat the DMA, causing the
* driver to think that data in memory is complete and meaningful. This code
* eliminates that race. This routine is called by the PIO read routines
* after doing the read. For PIC this routine then forces a fake interrupt
* on another line, which is logically associated with the slot that the PIO
* is addressed to. It then spins while watching the memory location that
* the interrupt is targeted to. When the interrupt response arrives, we
* are sure that the DMA has landed in memory and it is safe for the driver
* to proceed. For TIOCP use the Device(x) Write Request Buffer Flush
* Bridge register since it ensures the data has entered the coherence domain,
* unlike the PIC Device(x) Write Request Buffer Flush register.
*/
void sn_dma_flush(u64 addr)
{
nasid_t nasid;
int is_tio;
int wid_num;
int i, j;
unsigned long flags;
u64 itte;
struct hubdev_info *hubinfo;
struct sn_flush_device_kernel *p;
struct sn_flush_device_common *common;
struct sn_flush_nasid_entry *flush_nasid_list;
if (!sn_ioif_inited)
return;
nasid = NASID_GET(addr);
if (-1 == nasid_to_cnodeid(nasid))
return;
hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
BUG_ON(!hubinfo);
flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
if (flush_nasid_list->widget_p == NULL)
return;
is_tio = (nasid & 1);
if (is_tio) {
int itte_index;
if (TIO_HWIN(addr))
itte_index = 0;
else if (TIO_BWIN_WINDOWNUM(addr))
itte_index = TIO_BWIN_WINDOWNUM(addr);
else
itte_index = -1;
if (itte_index >= 0) {
itte = flush_nasid_list->iio_itte[itte_index];
if (! TIO_ITTE_VALID(itte))
return;
wid_num = TIO_ITTE_WIDGET(itte);
} else
wid_num = TIO_SWIN_WIDGETNUM(addr);
} else {
if (BWIN_WINDOWNUM(addr)) {
itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
wid_num = IIO_ITTE_WIDGET(itte);
} else
wid_num = SWIN_WIDGETNUM(addr);
}
if (flush_nasid_list->widget_p[wid_num] == NULL)
return;
p = &flush_nasid_list->widget_p[wid_num][0];
/* find a matching BAR */
for (i = 0; i < DEV_PER_WIDGET; i++,p++) {
common = p->common;
for (j = 0; j < PCI_ROM_RESOURCE; j++) {
if (common->sfdl_bar_list[j].start == 0)
break;
if (addr >= common->sfdl_bar_list[j].start
&& addr <= common->sfdl_bar_list[j].end)
break;
}
if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0)
break;
}
/* if no matching BAR, return without doing anything. */
if (i == DEV_PER_WIDGET)
return;
/*
* For TIOCP use the Device(x) Write Request Buffer Flush Bridge
* register since it ensures the data has entered the coherence
* domain, unlike PIC.
*/
if (is_tio) {
/*
* Note: devices behind TIOCE should never be matched in the
* above code, and so the following code is PIC/CP centric.
* If CE ever needs the sn_dma_flush mechanism, we will have
* to account for that here and in tioce_bus_fixup().
*/
u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
u32 revnum = XWIDGET_PART_REV_NUM(tio_id);
/* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
return;
} else {
pcireg_wrb_flush_get(common->sfdl_pcibus_info,
(common->sfdl_slot - 1));
}
} else {
spin_lock_irqsave(&p->sfdl_flush_lock, flags);
*common->sfdl_flush_addr = 0;
/* force an interrupt. */
*(volatile u32 *)(common->sfdl_force_int_addr) = 1;
/* wait for the interrupt to come back. */
while (*(common->sfdl_flush_addr) != 0x10f)
cpu_relax();
/* okay, everything is synched up. */
spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
}
return;
}
/*
* DMA interfaces. Called from pci_dma.c routines.
*/
dma_addr_t
pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
/* SN cannot support DMA addresses smaller than 32 bits. */
if (hwdev->dma_mask < 0x7fffffff) {
return 0;
}
if (hwdev->dma_mask == ~0UL) {
/*
* Handle the most common case: 64 bit cards. This
* call should always succeed.
*/
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_PREF, dma_flags);
} else {
/* Handle 32-63 bit cards via direct mapping */
dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
size, 0, dma_flags);
if (!dma_handle) {
/*
* It is a 32 bit card and we cannot do direct mapping,
* so we use an ATE.
*/
dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
size, PCI32_ATE_PREF,
dma_flags);
}
}
return dma_handle;
}
dma_addr_t
pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
if (hwdev->dev.coherent_dma_mask == ~0UL) {
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_BAR, dma_flags);
} else {
dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
phys_addr, size,
PCI32_ATE_BAR, dma_flags);
}
return dma_handle;
}
EXPORT_SYMBOL(sn_dma_flush);

View file

@ -1,265 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2004, 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/pic.h>
#include <asm/sn/sn2/sn_hwperf.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
int
sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
char **ssdt)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
u64 segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
0, 0);
return (int)ret_stuff.v0;
}
int
sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
void *resp)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
u64 segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE,
segment, busnum, (u64) device, (u64) action,
(u64) resp, 0, 0);
return (int)ret_stuff.v0;
}
static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
int segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_ERROR_INTERRUPT,
(u64) segment, (u64) busnum, 0, 0, 0, 0, 0);
return (int)ret_stuff.v0;
}
u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
{
long rc;
u16 uninitialized_var(ioboard); /* GCC be quiet */
nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard);
if (rc) {
printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
rc);
return 0;
}
return ioboard;
}
/*
* PCI Bridge Error interrupt handler. Gets invoked whenever a PCI
* bridge sends an error interrupt.
*/
static irqreturn_t
pcibr_error_intr_handler(int irq, void *arg)
{
struct pcibus_info *soft = arg;
if (sal_pcibr_error_interrupt(soft) < 0)
panic("pcibr_error_intr_handler(): Fatal Bridge Error");
return IRQ_HANDLED;
}
void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
int nasid, cnode, j;
struct hubdev_info *hubdev_info;
struct pcibus_info *soft;
struct sn_flush_device_kernel *sn_flush_device_kernel;
struct sn_flush_device_common *common;
if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
return NULL;
}
/*
* Allocate kernel bus soft and copy from prom.
*/
soft = kmemdup(prom_bussoft, sizeof(struct pcibus_info), GFP_KERNEL);
if (!soft) {
return NULL;
}
soft->pbi_buscommon.bs_base = (unsigned long)
ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base),
sizeof(struct pic));
spin_lock_init(&soft->pbi_lock);
/*
* register the bridge's error interrupt handler
*/
if (request_irq(SGI_PCIASIC_ERROR, pcibr_error_intr_handler,
IRQF_SHARED, "PCIBR error", (void *)(soft))) {
printk(KERN_WARNING
"pcibr cannot allocate interrupt for error handler\n");
}
irq_set_handler(SGI_PCIASIC_ERROR, handle_level_irq);
sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);
/*
* Update the Bridge with the "kernel" pagesize
*/
if (PAGE_SIZE < 16384) {
pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
} else {
pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
}
nasid = NASID_GET(soft->pbi_buscommon.bs_base);
cnode = nasid_to_cnodeid(nasid);
hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
if (hubdev_info->hdi_flush_nasid_list.widget_p) {
sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list.
widget_p[(int)soft->pbi_buscommon.bs_xid];
if (sn_flush_device_kernel) {
for (j = 0; j < DEV_PER_WIDGET;
j++, sn_flush_device_kernel++) {
common = sn_flush_device_kernel->common;
if (common->sfdl_slot == -1)
continue;
if ((common->sfdl_persistent_segment ==
soft->pbi_buscommon.bs_persist_segment) &&
(common->sfdl_persistent_busnum ==
soft->pbi_buscommon.bs_persist_busnum))
common->sfdl_pcibus_info =
soft;
}
}
}
/* Setup the PMU ATE map */
soft->pbi_int_ate_resource.lowest_free_index = 0;
soft->pbi_int_ate_resource.ate =
kcalloc(soft->pbi_int_ate_size, sizeof(u64), GFP_KERNEL);
if (!soft->pbi_int_ate_resource.ate) {
kfree(soft);
return NULL;
}
return soft;
}
void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
if (! sn_irq_info->irq_bridge)
return;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
pcireg_force_intr_set(pcibus_info, bit);
}
}
void pcibr_target_interrupt(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
u64 xtalk_addr = sn_irq_info->irq_xtalkaddr;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
/* Disable the device's IRQ */
pcireg_intr_enable_bit_clr(pcibus_info, (1 << bit));
/* Change the device's IRQ */
pcireg_intr_addr_addr_set(pcibus_info, bit, xtalk_addr);
/* Re-enable the device's IRQ */
pcireg_intr_enable_bit_set(pcibus_info, (1 << bit));
pcibr_force_interrupt(sn_irq_info);
}
}
/*
* Provider entries for PIC/CP
*/
struct sn_pcibus_provider pcibr_provider = {
.dma_map = pcibr_dma_map,
.dma_map_consistent = pcibr_dma_map_consistent,
.dma_unmap = pcibr_dma_unmap,
.bus_fixup = pcibr_bus_fixup,
.force_interrupt = pcibr_force_interrupt,
.target_interrupt = pcibr_target_interrupt
};
int
pcibr_init_provider(void)
{
sn_pci_provider[PCIIO_ASIC_TYPE_PIC] = &pcibr_provider;
sn_pci_provider[PCIIO_ASIC_TYPE_TIOCP] = &pcibr_provider;
return 0;
}
EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable);
EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable);
EXPORT_SYMBOL_GPL(sn_ioboard_to_pci_bus);

View file

@ -1,285 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/interrupt.h>
#include <linux/types.h>
#include <asm/sn/io.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pic.h>
#include <asm/sn/tiocp.h>
union br_ptr {
struct tiocp tio;
struct pic pic;
};
/*
* Control Register Access -- Read/Write 0000_0020
*/
void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_clrq_relaxed(&ptr->tio.cp_control, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_clrq_relaxed(&ptr->pic.p_wid_control, bits);
break;
default:
panic
("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_setq_relaxed(&ptr->tio.cp_control, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_setq_relaxed(&ptr->pic.p_wid_control, bits);
break;
default:
panic
("pcireg_control_bit_set: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
/*
* PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
*/
u64 pcireg_tflush_get(struct pcibus_info *pcibus_info)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret = __sn_readq_relaxed(&ptr->tio.cp_tflush);
break;
case PCIBR_BRIDGETYPE_PIC:
ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush);
break;
default:
panic
("pcireg_tflush_get: unknown bridgetype bridge 0x%p",
ptr);
}
}
/* Read of the Target Flush should always return zero */
if (ret != 0)
panic("pcireg_tflush_get:Target Flush failed\n");
return ret;
}
/*
* Interrupt Status Register Access -- Read Only 0000_0100
*/
u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret = __sn_readq_relaxed(&ptr->tio.cp_int_status);
break;
case PCIBR_BRIDGETYPE_PIC:
ret = __sn_readq_relaxed(&ptr->pic.p_int_status);
break;
default:
panic
("pcireg_intr_status_get: unknown bridgetype bridge 0x%p",
ptr);
}
}
return ret;
}
/*
* Interrupt Enable Register Access -- Read/Write 0000_0108
*/
void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_clrq_relaxed(&ptr->pic.p_int_enable, bits);
break;
default:
panic
("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_setq_relaxed(&ptr->tio.cp_int_enable, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_setq_relaxed(&ptr->pic.p_int_enable, bits);
break;
default:
panic
("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
/*
* Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
*/
void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
u64 addr)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n],
TIOCP_HOST_INTR_ADDR);
__sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n],
(addr & TIOCP_HOST_INTR_ADDR));
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n],
PIC_HOST_INTR_ADDR);
__sn_setq_relaxed(&ptr->pic.p_int_addr[int_n],
(addr & PIC_HOST_INTR_ADDR));
break;
default:
panic
("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
/*
* Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8
*/
void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
writeq(1, &ptr->tio.cp_force_pin[int_n]);
break;
case PCIBR_BRIDGETYPE_PIC:
writeq(1, &ptr->pic.p_force_pin[int_n]);
break;
default:
panic
("pcireg_force_intr_set: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
/*
* Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
*/
u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret =
__sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]);
break;
case PCIBR_BRIDGETYPE_PIC:
ret =
__sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]);
break;
default:
panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", ptr);
}
}
/* Read of the Write Buffer Flush should always return zero */
return ret;
}
void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
u64 val)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]);
break;
case PCIBR_BRIDGETYPE_PIC:
writeq(val, &ptr->pic.p_int_ate_ram[ate_index]);
break;
default:
panic
("pcireg_int_ate_set: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
u64 __iomem *ret = NULL;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret = &ptr->tio.cp_int_ate_ram[ate_index];
break;
case PCIBR_BRIDGETYPE_PIC:
ret = &ptr->pic.p_int_ate_ram[ate_index];
break;
default:
panic
("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p",
ptr);
}
}
return ret;
}

View file

@ -1,677 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/tioca_provider.h>
u32 tioca_gart_found;
EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */
LIST_HEAD(tioca_list);
EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */
static int tioca_gart_init(struct tioca_kernel *);
/**
* tioca_gart_init - Initialize SGI TIOCA GART
* @tioca_common: ptr to common prom/kernel struct identifying the
*
* If the indicated tioca has devices present, initialize its associated
* GART MMR's and kernel memory.
*/
static int
tioca_gart_init(struct tioca_kernel *tioca_kern)
{
u64 ap_reg;
u64 offset;
struct page *tmp;
struct tioca_common *tioca_common;
struct tioca __iomem *ca_base;
tioca_common = tioca_kern->ca_common;
ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
if (list_empty(tioca_kern->ca_devices))
return 0;
ap_reg = 0;
/*
* Validate aperature size
*/
switch (CA_APERATURE_SIZE >> 20) {
case 4:
ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */
break;
case 8:
ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */
break;
case 16:
ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */
break;
case 32:
ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */
break;
case 64:
ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */
break;
case 128:
ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */
break;
case 256:
ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */
break;
case 512:
ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */
break;
case 1024:
ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */
break;
case 2048:
ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */
break;
case 4096:
ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */
break;
default:
printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
"0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE);
return -1;
}
/*
* Set up other aperature parameters
*/
if (PAGE_SIZE >= 16384) {
tioca_kern->ca_ap_pagesize = 16384;
ap_reg |= CA_GART_PAGE_SIZE;
} else {
tioca_kern->ca_ap_pagesize = 4096;
}
tioca_kern->ca_ap_size = CA_APERATURE_SIZE;
tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE;
tioca_kern->ca_gart_entries =
tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize;
ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI);
ap_reg |= tioca_kern->ca_ap_bus_base;
/*
* Allocate and set up the GART
*/
tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64);
tmp =
alloc_pages_node(tioca_kern->ca_closest_node,
GFP_KERNEL | __GFP_ZERO,
get_order(tioca_kern->ca_gart_size));
if (!tmp) {
printk(KERN_ERR "%s: Could not allocate "
"%llu bytes (order %d) for GART\n",
__func__,
tioca_kern->ca_gart_size,
get_order(tioca_kern->ca_gart_size));
return -ENOMEM;
}
tioca_kern->ca_gart = page_address(tmp);
tioca_kern->ca_gart_coretalk_addr =
PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart));
/*
* Compute PCI/AGP convenience fields
*/
offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE;
tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE;
tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE;
tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize;
tioca_kern->ca_pcigart_base =
tioca_kern->ca_gart_coretalk_addr + offset;
tioca_kern->ca_pcigart =
&tioca_kern->ca_gart[tioca_kern->ca_pcigart_start];
tioca_kern->ca_pcigart_entries =
tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
tioca_kern->ca_pcigart_pagemap =
kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
if (!tioca_kern->ca_pcigart_pagemap) {
free_pages((unsigned long)tioca_kern->ca_gart,
get_order(tioca_kern->ca_gart_size));
return -1;
}
offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE;
tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE;
tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE;
tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize;
tioca_kern->ca_gfxgart_base =
tioca_kern->ca_gart_coretalk_addr + offset;
tioca_kern->ca_gfxgart =
&tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start];
tioca_kern->ca_gfxgart_entries =
tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize;
/*
* various control settings:
* use agp op-combining
* use GET semantics to fetch memory
* participate in coherency domain
* DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
*/
__sn_setq_relaxed(&ca_base->ca_control1,
CA_AGPDMA_OP_ENB_COMBDELAY); /* PV895469 ? */
__sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
__sn_setq_relaxed(&ca_base->ca_control2,
(0x2ull << CA_GART_MEM_PARAM_SHFT));
tioca_kern->ca_gart_iscoherent = 1;
__sn_clrq_relaxed(&ca_base->ca_control2,
(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB));
/*
* Unmask GART fetch error interrupts. Clear residual errors first.
*/
writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias);
writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias);
__sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR);
/*
* Program the aperature and gart registers in TIOCA
*/
writeq(ap_reg, &ca_base->ca_gart_aperature);
writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table);
return 0;
}
/**
* tioca_fastwrite_enable - enable AGP FW for a tioca and its functions
* @tioca_kernel: structure representing the CA
*
* Given a CA, scan all attached functions making sure they all support
* FastWrite. If so, enable FastWrite for all functions and the CA itself.
*/
void
tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
{
int cap_ptr;
u32 reg;
struct tioca __iomem *tioca_base;
struct pci_dev *pdev;
struct tioca_common *common;
common = tioca_kern->ca_common;
/*
* Scan all vga controllers on this bus making sure they all
* support FW. If not, return.
*/
list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
continue;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return; /* no AGP CAP means no FW */
pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, &reg);
if (!(reg & PCI_AGP_STATUS_FW))
return; /* function doesn't support FW */
}
/*
* Set fw for all vga fn's
*/
list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
continue;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, &reg);
reg |= PCI_AGP_COMMAND_FW;
pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg);
}
/*
* Set ca's fw to match
*/
tioca_base = (struct tioca __iomem*)common->ca_common.bs_base;
__sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE);
}
EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
/**
* tioca_dma_d64 - create a DMA mapping using 64-bit direct mode
* @paddr: system physical address
*
* Map @paddr into 64-bit CA bus space. No device context is necessary.
* Bits 53:0 come from the coretalk address. We just need to mask in the
* following optional bits of the 64-bit pci address:
*
* 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent)
* 0x2 for PIO (non-coherent)
* We will always use 0x1
* 55:55 - Swap bytes Currently unused
*/
static u64
tioca_dma_d64(unsigned long paddr)
{
dma_addr_t bus_addr;
bus_addr = PHYS_TO_TIODMA(paddr);
BUG_ON(!bus_addr);
BUG_ON(bus_addr >> 54);
/* Set upper nibble to Cache Coherent Memory op */
bus_addr |= (1UL << 60);
return bus_addr;
}
/**
* tioca_dma_d48 - create a DMA mapping using 48-bit direct mode
* @pdev: linux pci_dev representing the function
* @paddr: system physical address
*
* Map @paddr into 64-bit bus space of the CA associated with @pcidev_info.
*
* The CA agp 48 bit direct address falls out as follows:
*
* When direct mapping AGP addresses, the 48 bit AGP address is
* constructed as follows:
*
* [47:40] - Low 8 bits of the page Node ID extracted from coretalk
* address [47:40]. The upper 8 node bits are fixed
* and come from the xxx register bits [5:0]
* [39:38] - Chiplet ID extracted from coretalk address [39:38]
* [37:00] - node offset extracted from coretalk address [37:00]
*
* Since the node id in general will be non-zero, and the chiplet id
* will always be non-zero, it follows that the device must support
* a dma mask of at least 0xffffffffff (40 bits) to target node 0
* and in general should be 0xffffffffffff (48 bits) to target nodes
* up to 255. Nodes above 255 need the support of the xxx register,
* and so a given CA can only directly target nodes in the range
* xxx - xxx+255.
*/
static u64
tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
{
struct tioca_common *tioca_common;
struct tioca __iomem *ca_base;
u64 ct_addr;
dma_addr_t bus_addr;
u32 node_upper;
u64 agp_dma_extn;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
ct_addr = PHYS_TO_TIODMA(paddr);
if (!ct_addr)
return 0;
bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL);
node_upper = ct_addr >> 48;
if (node_upper > 64) {
printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
"of range\n", __func__, (void *)ct_addr);
return 0;
}
agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn);
if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
printk(KERN_ERR "%s: coretalk upper node (%u) "
"mismatch with ca_agp_dma_addr_extn (%llu)\n",
__func__,
node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
return 0;
}
return bus_addr;
}
/**
* tioca_dma_mapped - create a DMA mapping using a CA GART
* @pdev: linux pci_dev representing the function
* @paddr: host physical address to map
* @req_size: len (bytes) to map
*
* Map @paddr into CA address space using the GART mechanism. The mapped
* dma_addr_t is guaranteed to be contiguous in CA bus space.
*/
static dma_addr_t
tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
{
int ps, ps_shift, entry, entries, mapsize;
u64 xio_addr, end_xio_addr;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
dma_addr_t bus_addr = 0;
struct tioca_dmamap *ca_dmamap;
void *map;
unsigned long flags;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
xio_addr = PHYS_TO_TIODMA(paddr);
if (!xio_addr)
return 0;
spin_lock_irqsave(&tioca_kern->ca_lock, flags);
/*
* allocate a map struct
*/
ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC);
if (!ca_dmamap)
goto map_return;
/*
* Locate free entries that can hold req_size. Account for
* unaligned start/length when allocating.
*/
ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */
ps_shift = ffs(ps) - 1;
end_xio_addr = xio_addr + req_size - 1;
entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1;
map = tioca_kern->ca_pcigart_pagemap;
mapsize = tioca_kern->ca_pcigart_entries;
entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
if (entry >= mapsize) {
kfree(ca_dmamap);
goto map_return;
}
bitmap_set(map, entry, entries);
bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
ca_dmamap->cad_dma_addr = bus_addr;
ca_dmamap->cad_gart_size = entries;
ca_dmamap->cad_gart_entry = entry;
list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps);
if (xio_addr % ps) {
tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
bus_addr += xio_addr & (ps - 1);
xio_addr &= ~(ps - 1);
xio_addr += ps;
entry++;
}
while (xio_addr < end_xio_addr) {
tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
xio_addr += ps;
entry++;
}
tioca_tlbflush(tioca_kern);
map_return:
spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
return bus_addr;
}
/**
* tioca_dma_unmap - release CA mapping resources
* @pdev: linux pci_dev representing the function
* @bus_addr: bus address returned by an earlier tioca_dma_map
* @dir: mapping direction (unused)
*
* Locate mapping resources associated with @bus_addr and release them.
* For mappings created using the direct modes (64 or 48) there are no
* resources to release.
*/
static void
tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
{
int i, entry;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
struct tioca_dmamap *map;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
unsigned long flags;
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
/* return straight away if this isn't be a mapped address */
if (bus_addr < tioca_kern->ca_pciap_base ||
bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size))
return;
spin_lock_irqsave(&tioca_kern->ca_lock, flags);
list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list)
if (map->cad_dma_addr == bus_addr)
break;
BUG_ON(map == NULL);
entry = map->cad_gart_entry;
for (i = 0; i < map->cad_gart_size; i++, entry++) {
clear_bit(entry, tioca_kern->ca_pcigart_pagemap);
tioca_kern->ca_pcigart[entry] = 0;
}
tioca_tlbflush(tioca_kern);
list_del(&map->cad_list);
spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
kfree(map);
}
/**
* tioca_dma_map - map pages for PCI DMA
* @pdev: linux pci_dev representing the function
* @paddr: host physical address to map
* @byte_count: bytes to map
*
* This is the main wrapper for mapping host physical pages to CA PCI space.
* The mapping mode used is based on the devices dma_mask. As a last resort
* use the GART mapped mode.
*/
static u64
tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
{
u64 mapaddr;
/*
* Not supported for now ...
*/
if (dma_flags & SN_DMA_MSI)
return 0;
/*
* If card is 64 or 48 bit addressable, use a direct mapping. 32
* bit direct is so restrictive w.r.t. where the memory resides that
* we don't use it even though CA has some support.
*/
if (pdev->dma_mask == ~0UL)
mapaddr = tioca_dma_d64(paddr);
else if (pdev->dma_mask == 0xffffffffffffUL)
mapaddr = tioca_dma_d48(pdev, paddr);
else
mapaddr = 0;
/* Last resort ... use PCI portion of CA GART */
if (mapaddr == 0)
mapaddr = tioca_dma_mapped(pdev, paddr, byte_count);
return mapaddr;
}
/**
* tioca_error_intr_handler - SGI TIO CA error interrupt handler
* @irq: unused
* @arg: pointer to tioca_common struct for the given CA
*
* Handle a CA error interrupt. Simply a wrapper around a SAL call which
* defers processing to the SGI prom.
*/
static irqreturn_t
tioca_error_intr_handler(int irq, void *arg)
{
struct tioca_common *soft = arg;
struct ia64_sal_retval ret_stuff;
u64 segment;
u64 busnum;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->ca_common.bs_persist_segment;
busnum = soft->ca_common.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_ERROR_INTERRUPT,
segment, busnum, 0, 0, 0, 0, 0);
return IRQ_HANDLED;
}
/**
* tioca_bus_fixup - perform final PCI fixup for a TIO CA bus
* @prom_bussoft: Common prom/kernel struct representing the bus
*
* Replicates the tioca_common pointed to by @prom_bussoft in kernel
* space. Allocates and initializes a kernel-only area for a given CA,
* and sets up an irq for handling CA error interrupts.
*
* On successful setup, returns the kernel version of tioca_common back to
* the caller.
*/
static void *
tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
struct pci_bus *bus;
/* sanity check prom rev */
if (is_shub1() && sn_sal_rev() < 0x0406) {
printk
(KERN_ERR "%s: SGI prom rev 4.06 or greater required "
"for tioca support\n", __func__);
return NULL;
}
/*
* Allocate kernel bus soft and copy from prom.
*/
tioca_common = kmemdup(prom_bussoft, sizeof(struct tioca_common),
GFP_KERNEL);
if (!tioca_common)
return NULL;
tioca_common->ca_common.bs_base = (unsigned long)
ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base),
sizeof(struct tioca_common));
/* init kernel-private area */
tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL);
if (!tioca_kern) {
kfree(tioca_common);
return NULL;
}
tioca_kern->ca_common = tioca_common;
spin_lock_init(&tioca_kern->ca_lock);
INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
tioca_kern->ca_closest_node =
nasid_to_cnodeid(tioca_common->ca_closest_nasid);
tioca_common->ca_kernel_private = (u64) tioca_kern;
bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment,
tioca_common->ca_common.bs_persist_busnum);
BUG_ON(!bus);
tioca_kern->ca_devices = &bus->devices;
/* init GART */
if (tioca_gart_init(tioca_kern) < 0) {
kfree(tioca_kern);
kfree(tioca_common);
return NULL;
}
tioca_gart_found++;
list_add(&tioca_kern->ca_list, &tioca_list);
if (request_irq(SGI_TIOCA_ERROR,
tioca_error_intr_handler,
IRQF_SHARED, "TIOCA error", (void *)tioca_common))
printk(KERN_WARNING
"%s: Unable to get irq %d. "
"Error interrupts won't be routed for TIOCA bus %d\n",
__func__, SGI_TIOCA_ERROR,
(int)tioca_common->ca_common.bs_persist_busnum);
irq_set_handler(SGI_TIOCA_ERROR, handle_level_irq);
sn_set_err_irq_affinity(SGI_TIOCA_ERROR);
/* Setup locality information */
controller->node = tioca_kern->ca_closest_node;
return tioca_common;
}
static struct sn_pcibus_provider tioca_pci_interfaces = {
.dma_map = tioca_dma_map,
.dma_map_consistent = tioca_dma_map,
.dma_unmap = tioca_dma_unmap,
.bus_fixup = tioca_bus_fixup,
.force_interrupt = NULL,
.target_interrupt = NULL
};
/**
* tioca_init_provider - init SN PCI provider ops for TIO CA
*/
int
tioca_init_provider(void)
{
sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces;
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -17,11 +17,9 @@
DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
#ifdef CONFIG_IA64_SGI_UV
int sn_prom_type;
long sn_coherency_id;
EXPORT_SYMBOL_GPL(sn_coherency_id);
#endif
struct redir_addr {
unsigned long redirect;

View file

@ -323,7 +323,7 @@ config ACPI_NUMA
bool "NUMA support"
depends on NUMA
depends on (X86 || IA64 || ARM64)
default y if IA64_GENERIC || IA64_SGI_SN2 || ARM64
default y if IA64_GENERIC || ARM64
config ACPI_CUSTOM_DSDT_FILE
string "Custom DSDT Table file to include"