mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 17:08:10 +00:00
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] Convert ia64 to use int-ll64.h [IA64] Fix build error in paravirt_patchlist.c [IA64] ia64 does not need umount2() syscall [IA64] hook up new rt_tgsigqueueinfo syscall [IA64] msi_ia64.c dmar_msi_type should be static [IA64] remove obsolete hw_interrupt_type [IA64] remove obsolete irq_desc_t typedef [IA64] remove obsolete no_irq_type [IA64] unexport fpswa.h
This commit is contained in:
commit
7cc4766219
49 changed files with 238 additions and 231 deletions
|
@ -1787,7 +1787,7 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ioc * __init
|
static struct ioc * __init
|
||||||
ioc_init(u64 hpa, void *handle)
|
ioc_init(unsigned long hpa, void *handle)
|
||||||
{
|
{
|
||||||
struct ioc *ioc;
|
struct ioc *ioc;
|
||||||
struct ioc_iommu *info;
|
struct ioc_iommu *info;
|
||||||
|
|
|
@ -27,7 +27,7 @@ hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct hw_interrupt_type irq_type_hp_sim = {
|
static struct irq_chip irq_type_hp_sim = {
|
||||||
.name = "hpsim",
|
.name = "hpsim",
|
||||||
.startup = hpsim_irq_startup,
|
.startup = hpsim_irq_startup,
|
||||||
.shutdown = hpsim_irq_noop,
|
.shutdown = hpsim_irq_noop,
|
||||||
|
@ -41,12 +41,12 @@ static struct hw_interrupt_type irq_type_hp_sim = {
|
||||||
void __init
|
void __init
|
||||||
hpsim_irq_init (void)
|
hpsim_irq_init (void)
|
||||||
{
|
{
|
||||||
irq_desc_t *idesc;
|
struct irq_desc *idesc;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NR_IRQS; ++i) {
|
for (i = 0; i < NR_IRQS; ++i) {
|
||||||
idesc = irq_desc + i;
|
idesc = irq_desc + i;
|
||||||
if (idesc->chip == &no_irq_type)
|
if (idesc->chip == &no_irq_chip)
|
||||||
idesc->chip = &irq_type_hp_sim;
|
idesc->chip = &irq_type_hp_sim;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ include include/asm-generic/Kbuild.asm
|
||||||
|
|
||||||
header-y += break.h
|
header-y += break.h
|
||||||
header-y += fpu.h
|
header-y += fpu.h
|
||||||
header-y += fpswa.h
|
|
||||||
header-y += ia64regs.h
|
header-y += ia64regs.h
|
||||||
header-y += intel_intrin.h
|
header-y += intel_intrin.h
|
||||||
header-y += perfmon_default_smpl.h
|
header-y += perfmon_default_smpl.h
|
||||||
|
|
|
@ -388,7 +388,7 @@ register unsigned long ia64_r13 asm ("r13") __used;
|
||||||
|
|
||||||
#define ia64_native_thash(addr) \
|
#define ia64_native_thash(addr) \
|
||||||
({ \
|
({ \
|
||||||
__u64 ia64_intri_res; \
|
unsigned long ia64_intri_res; \
|
||||||
asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
|
asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
|
||||||
ia64_intri_res; \
|
ia64_intri_res; \
|
||||||
})
|
})
|
||||||
|
@ -419,7 +419,7 @@ register unsigned long ia64_r13 asm ("r13") __used;
|
||||||
|
|
||||||
#define ia64_tpa(addr) \
|
#define ia64_tpa(addr) \
|
||||||
({ \
|
({ \
|
||||||
__u64 ia64_pa; \
|
unsigned long ia64_pa; \
|
||||||
asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
|
asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
|
||||||
ia64_pa; \
|
ia64_pa; \
|
||||||
})
|
})
|
||||||
|
@ -444,35 +444,35 @@ register unsigned long ia64_r13 asm ("r13") __used;
|
||||||
|
|
||||||
#define ia64_native_get_cpuid(index) \
|
#define ia64_native_get_cpuid(index) \
|
||||||
({ \
|
({ \
|
||||||
__u64 ia64_intri_res; \
|
unsigned long ia64_intri_res; \
|
||||||
asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
|
asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
|
||||||
ia64_intri_res; \
|
ia64_intri_res; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define __ia64_get_dbr(index) \
|
#define __ia64_get_dbr(index) \
|
||||||
({ \
|
({ \
|
||||||
__u64 ia64_intri_res; \
|
unsigned long ia64_intri_res; \
|
||||||
asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
|
asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
|
||||||
ia64_intri_res; \
|
ia64_intri_res; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define ia64_get_ibr(index) \
|
#define ia64_get_ibr(index) \
|
||||||
({ \
|
({ \
|
||||||
__u64 ia64_intri_res; \
|
unsigned long ia64_intri_res; \
|
||||||
asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
|
asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
|
||||||
ia64_intri_res; \
|
ia64_intri_res; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define ia64_get_pkr(index) \
|
#define ia64_get_pkr(index) \
|
||||||
({ \
|
({ \
|
||||||
__u64 ia64_intri_res; \
|
unsigned long ia64_intri_res; \
|
||||||
asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
|
asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
|
||||||
ia64_intri_res; \
|
ia64_intri_res; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define ia64_get_pmc(index) \
|
#define ia64_get_pmc(index) \
|
||||||
({ \
|
({ \
|
||||||
__u64 ia64_intri_res; \
|
unsigned long ia64_intri_res; \
|
||||||
asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
|
asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
|
||||||
ia64_intri_res; \
|
ia64_intri_res; \
|
||||||
})
|
})
|
||||||
|
@ -480,14 +480,14 @@ register unsigned long ia64_r13 asm ("r13") __used;
|
||||||
|
|
||||||
#define ia64_native_get_pmd(index) \
|
#define ia64_native_get_pmd(index) \
|
||||||
({ \
|
({ \
|
||||||
__u64 ia64_intri_res; \
|
unsigned long ia64_intri_res; \
|
||||||
asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
|
asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
|
||||||
ia64_intri_res; \
|
ia64_intri_res; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define ia64_native_get_rr(index) \
|
#define ia64_native_get_rr(index) \
|
||||||
({ \
|
({ \
|
||||||
__u64 ia64_intri_res; \
|
unsigned long ia64_intri_res; \
|
||||||
asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
|
asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
|
||||||
ia64_intri_res; \
|
ia64_intri_res; \
|
||||||
})
|
})
|
||||||
|
|
|
@ -106,7 +106,7 @@ extern struct irq_cfg irq_cfg[NR_IRQS];
|
||||||
#define irq_to_domain(x) irq_cfg[(x)].domain
|
#define irq_to_domain(x) irq_cfg[(x)].domain
|
||||||
DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
|
DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
|
||||||
|
|
||||||
extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
|
extern struct irq_chip irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
|
||||||
|
|
||||||
#ifdef CONFIG_PARAVIRT_GUEST
|
#ifdef CONFIG_PARAVIRT_GUEST
|
||||||
#include <asm/paravirt.h>
|
#include <asm/paravirt.h>
|
||||||
|
@ -146,7 +146,7 @@ static inline void ia64_native_resend_irq(unsigned int vector)
|
||||||
* Default implementations for the irq-descriptor API:
|
* Default implementations for the irq-descriptor API:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
extern irq_desc_t irq_desc[NR_IRQS];
|
extern struct irq_desc irq_desc[NR_IRQS];
|
||||||
|
|
||||||
#ifndef CONFIG_IA64_GENERIC
|
#ifndef CONFIG_IA64_GENERIC
|
||||||
static inline ia64_vector __ia64_irq_to_vector(int irq)
|
static inline ia64_vector __ia64_irq_to_vector(int irq)
|
||||||
|
|
|
@ -72,39 +72,39 @@ typedef struct ia64_mc_info_s {
|
||||||
struct ia64_sal_os_state {
|
struct ia64_sal_os_state {
|
||||||
|
|
||||||
/* SAL to OS */
|
/* SAL to OS */
|
||||||
u64 os_gp; /* GP of the os registered with the SAL, physical */
|
unsigned long os_gp; /* GP of the os registered with the SAL, physical */
|
||||||
u64 pal_proc; /* PAL_PROC entry point, physical */
|
unsigned long pal_proc; /* PAL_PROC entry point, physical */
|
||||||
u64 sal_proc; /* SAL_PROC entry point, physical */
|
unsigned long sal_proc; /* SAL_PROC entry point, physical */
|
||||||
u64 rv_rc; /* MCA - Rendezvous state, INIT - reason code */
|
unsigned long rv_rc; /* MCA - Rendezvous state, INIT - reason code */
|
||||||
u64 proc_state_param; /* from R18 */
|
unsigned long proc_state_param; /* from R18 */
|
||||||
u64 monarch; /* 1 for a monarch event, 0 for a slave */
|
unsigned long monarch; /* 1 for a monarch event, 0 for a slave */
|
||||||
|
|
||||||
/* common */
|
/* common */
|
||||||
u64 sal_ra; /* Return address in SAL, physical */
|
unsigned long sal_ra; /* Return address in SAL, physical */
|
||||||
u64 sal_gp; /* GP of the SAL - physical */
|
unsigned long sal_gp; /* GP of the SAL - physical */
|
||||||
pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */
|
pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */
|
||||||
/* Previous values of IA64_KR(CURRENT) and IA64_KR(CURRENT_STACK).
|
/* Previous values of IA64_KR(CURRENT) and IA64_KR(CURRENT_STACK).
|
||||||
* Note: if the MCA/INIT recovery code wants to resume to a new context
|
* Note: if the MCA/INIT recovery code wants to resume to a new context
|
||||||
* then it must change these values to reflect the new kernel stack.
|
* then it must change these values to reflect the new kernel stack.
|
||||||
*/
|
*/
|
||||||
u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */
|
unsigned long prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */
|
||||||
u64 prev_IA64_KR_CURRENT_STACK;
|
unsigned long prev_IA64_KR_CURRENT_STACK;
|
||||||
struct task_struct *prev_task; /* previous task, NULL if it is not useful */
|
struct task_struct *prev_task; /* previous task, NULL if it is not useful */
|
||||||
/* Some interrupt registers are not saved in minstate, pt_regs or
|
/* Some interrupt registers are not saved in minstate, pt_regs or
|
||||||
* switch_stack. Because MCA/INIT can occur when interrupts are
|
* switch_stack. Because MCA/INIT can occur when interrupts are
|
||||||
* disabled, we need to save the additional interrupt registers over
|
* disabled, we need to save the additional interrupt registers over
|
||||||
* MCA/INIT and resume.
|
* MCA/INIT and resume.
|
||||||
*/
|
*/
|
||||||
u64 isr;
|
unsigned long isr;
|
||||||
u64 ifa;
|
unsigned long ifa;
|
||||||
u64 itir;
|
unsigned long itir;
|
||||||
u64 iipa;
|
unsigned long iipa;
|
||||||
u64 iim;
|
unsigned long iim;
|
||||||
u64 iha;
|
unsigned long iha;
|
||||||
|
|
||||||
/* OS to SAL */
|
/* OS to SAL */
|
||||||
u64 os_status; /* OS status to SAL, enum below */
|
unsigned long os_status; /* OS status to SAL, enum below */
|
||||||
u64 context; /* 0 if return to same context
|
unsigned long context; /* 0 if return to same context
|
||||||
1 if return to new context */
|
1 if return to new context */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -150,7 +150,7 @@ extern void ia64_slave_init_handler(void);
|
||||||
extern void ia64_mca_cmc_vector_setup(void);
|
extern void ia64_mca_cmc_vector_setup(void);
|
||||||
extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
|
extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
|
||||||
extern void ia64_unreg_MCA_extension(void);
|
extern void ia64_unreg_MCA_extension(void);
|
||||||
extern u64 ia64_get_rnat(u64 *);
|
extern unsigned long ia64_get_rnat(unsigned long *);
|
||||||
extern void ia64_mca_printk(const char * fmt, ...)
|
extern void ia64_mca_printk(const char * fmt, ...)
|
||||||
__attribute__ ((format (printf, 1, 2)));
|
__attribute__ ((format (printf, 1, 2)));
|
||||||
|
|
||||||
|
|
|
@ -25,8 +25,8 @@
|
||||||
#define IA64_MAX_RSVD_REGIONS 9
|
#define IA64_MAX_RSVD_REGIONS 9
|
||||||
|
|
||||||
struct rsvd_region {
|
struct rsvd_region {
|
||||||
unsigned long start; /* virtual address of beginning of element */
|
u64 start; /* virtual address of beginning of element */
|
||||||
unsigned long end; /* virtual address of end of element + 1 */
|
u64 end; /* virtual address of end of element + 1 */
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
|
extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
|
||||||
|
@ -35,13 +35,13 @@ extern int num_rsvd_regions;
|
||||||
extern void find_memory (void);
|
extern void find_memory (void);
|
||||||
extern void reserve_memory (void);
|
extern void reserve_memory (void);
|
||||||
extern void find_initrd (void);
|
extern void find_initrd (void);
|
||||||
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
|
extern int filter_rsvd_memory (u64 start, u64 end, void *arg);
|
||||||
extern int filter_memory (unsigned long start, unsigned long end, void *arg);
|
extern int filter_memory (u64 start, u64 end, void *arg);
|
||||||
extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e);
|
extern unsigned long efi_memmap_init(u64 *s, u64 *e);
|
||||||
extern int find_max_min_low_pfn (unsigned long , unsigned long, void *);
|
extern int find_max_min_low_pfn (u64, u64, void *);
|
||||||
|
|
||||||
extern unsigned long vmcore_find_descriptor_size(unsigned long address);
|
extern unsigned long vmcore_find_descriptor_size(unsigned long address);
|
||||||
extern int reserve_elfcorehdr(unsigned long *start, unsigned long *end);
|
extern int reserve_elfcorehdr(u64 *start, u64 *end);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For rounding an address to the next IA64_GRANULE_SIZE or order
|
* For rounding an address to the next IA64_GRANULE_SIZE or order
|
||||||
|
@ -63,8 +63,8 @@ extern int register_active_ranges(u64 start, u64 len, int nid);
|
||||||
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
|
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
|
||||||
extern unsigned long vmalloc_end;
|
extern unsigned long vmalloc_end;
|
||||||
extern struct page *vmem_map;
|
extern struct page *vmem_map;
|
||||||
extern int find_largest_hole (u64 start, u64 end, void *arg);
|
extern int find_largest_hole(u64 start, u64 end, void *arg);
|
||||||
extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
|
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
|
||||||
extern int vmemmap_find_next_valid_pfn(int, int);
|
extern int vmemmap_find_next_valid_pfn(int, int);
|
||||||
#else
|
#else
|
||||||
static inline int vmemmap_find_next_valid_pfn(int node, int i)
|
static inline int vmemmap_find_next_valid_pfn(int node, int i)
|
||||||
|
|
|
@ -989,8 +989,8 @@ ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return summary information about the hierarchy of caches controlled by the processor */
|
/* Return summary information about the hierarchy of caches controlled by the processor */
|
||||||
static inline s64
|
static inline long ia64_pal_cache_summary(unsigned long *cache_levels,
|
||||||
ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches)
|
unsigned long *unique_caches)
|
||||||
{
|
{
|
||||||
struct ia64_pal_retval iprv;
|
struct ia64_pal_retval iprv;
|
||||||
PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0);
|
PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0);
|
||||||
|
@ -1038,8 +1038,8 @@ ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 *pal_proc
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return the number of instruction and data debug register pairs */
|
/* Return the number of instruction and data debug register pairs */
|
||||||
static inline s64
|
static inline long ia64_pal_debug_info(unsigned long *inst_regs,
|
||||||
ia64_pal_debug_info (u64 *inst_regs, u64 *data_regs)
|
unsigned long *data_regs)
|
||||||
{
|
{
|
||||||
struct ia64_pal_retval iprv;
|
struct ia64_pal_retval iprv;
|
||||||
PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0);
|
PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0);
|
||||||
|
@ -1074,8 +1074,7 @@ ia64_pal_fixed_addr (u64 *global_unique_addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get base frequency of the platform if generated by the processor */
|
/* Get base frequency of the platform if generated by the processor */
|
||||||
static inline s64
|
static inline long ia64_pal_freq_base(unsigned long *platform_base_freq)
|
||||||
ia64_pal_freq_base (u64 *platform_base_freq)
|
|
||||||
{
|
{
|
||||||
struct ia64_pal_retval iprv;
|
struct ia64_pal_retval iprv;
|
||||||
PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0);
|
PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0);
|
||||||
|
@ -1437,7 +1436,7 @@ ia64_pal_proc_set_features (u64 feature_select)
|
||||||
* possible.
|
* possible.
|
||||||
*/
|
*/
|
||||||
typedef struct ia64_ptce_info_s {
|
typedef struct ia64_ptce_info_s {
|
||||||
u64 base;
|
unsigned long base;
|
||||||
u32 count[2];
|
u32 count[2];
|
||||||
u32 stride[2];
|
u32 stride[2];
|
||||||
} ia64_ptce_info_t;
|
} ia64_ptce_info_t;
|
||||||
|
@ -1478,9 +1477,9 @@ ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2)
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef union pal_hints_u {
|
typedef union pal_hints_u {
|
||||||
u64 ph_data;
|
unsigned long ph_data;
|
||||||
struct {
|
struct {
|
||||||
u64 si : 1,
|
unsigned long si : 1,
|
||||||
li : 1,
|
li : 1,
|
||||||
reserved : 62;
|
reserved : 62;
|
||||||
} pal_hints_s;
|
} pal_hints_s;
|
||||||
|
@ -1489,8 +1488,8 @@ typedef union pal_hints_u {
|
||||||
/* Return information about the register stack and RSE for this processor
|
/* Return information about the register stack and RSE for this processor
|
||||||
* implementation.
|
* implementation.
|
||||||
*/
|
*/
|
||||||
static inline s64
|
static inline long ia64_pal_rse_info(unsigned long *num_phys_stacked,
|
||||||
ia64_pal_rse_info (u64 *num_phys_stacked, pal_hints_u_t *hints)
|
pal_hints_u_t *hints)
|
||||||
{
|
{
|
||||||
struct ia64_pal_retval iprv;
|
struct ia64_pal_retval iprv;
|
||||||
PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0);
|
PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0);
|
||||||
|
@ -1608,8 +1607,7 @@ ia64_pal_vm_info (u64 tc_level, u64 tc_type, pal_tc_info_u_t *tc_info, u64 *tc_
|
||||||
/* Get page size information about the virtual memory characteristics of the processor
|
/* Get page size information about the virtual memory characteristics of the processor
|
||||||
* implementation.
|
* implementation.
|
||||||
*/
|
*/
|
||||||
static inline s64
|
static inline s64 ia64_pal_vm_page_size(u64 *tr_pages, u64 *vw_pages)
|
||||||
ia64_pal_vm_page_size (u64 *tr_pages, u64 *vw_pages)
|
|
||||||
{
|
{
|
||||||
struct ia64_pal_retval iprv;
|
struct ia64_pal_retval iprv;
|
||||||
PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0);
|
PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0);
|
||||||
|
|
|
@ -187,40 +187,40 @@ union ia64_rr {
|
||||||
* state comes earlier:
|
* state comes earlier:
|
||||||
*/
|
*/
|
||||||
struct cpuinfo_ia64 {
|
struct cpuinfo_ia64 {
|
||||||
__u32 softirq_pending;
|
unsigned int softirq_pending;
|
||||||
__u64 itm_delta; /* # of clock cycles between clock ticks */
|
unsigned long itm_delta; /* # of clock cycles between clock ticks */
|
||||||
__u64 itm_next; /* interval timer mask value to use for next clock tick */
|
unsigned long itm_next; /* interval timer mask value to use for next clock tick */
|
||||||
__u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
|
unsigned long nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
|
||||||
__u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
|
unsigned long unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
|
||||||
__u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
|
unsigned long unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
|
||||||
__u64 itc_freq; /* frequency of ITC counter */
|
unsigned long itc_freq; /* frequency of ITC counter */
|
||||||
__u64 proc_freq; /* frequency of processor */
|
unsigned long proc_freq; /* frequency of processor */
|
||||||
__u64 cyc_per_usec; /* itc_freq/1000000 */
|
unsigned long cyc_per_usec; /* itc_freq/1000000 */
|
||||||
__u64 ptce_base;
|
unsigned long ptce_base;
|
||||||
__u32 ptce_count[2];
|
unsigned int ptce_count[2];
|
||||||
__u32 ptce_stride[2];
|
unsigned int ptce_stride[2];
|
||||||
struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
|
struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
__u64 loops_per_jiffy;
|
unsigned long loops_per_jiffy;
|
||||||
int cpu;
|
int cpu;
|
||||||
__u32 socket_id; /* physical processor socket id */
|
unsigned int socket_id; /* physical processor socket id */
|
||||||
__u16 core_id; /* core id */
|
unsigned short core_id; /* core id */
|
||||||
__u16 thread_id; /* thread id */
|
unsigned short thread_id; /* thread id */
|
||||||
__u16 num_log; /* Total number of logical processors on
|
unsigned short num_log; /* Total number of logical processors on
|
||||||
* this socket that were successfully booted */
|
* this socket that were successfully booted */
|
||||||
__u8 cores_per_socket; /* Cores per processor socket */
|
unsigned char cores_per_socket; /* Cores per processor socket */
|
||||||
__u8 threads_per_core; /* Threads per core */
|
unsigned char threads_per_core; /* Threads per core */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* CPUID-derived information: */
|
/* CPUID-derived information: */
|
||||||
__u64 ppn;
|
unsigned long ppn;
|
||||||
__u64 features;
|
unsigned long features;
|
||||||
__u8 number;
|
unsigned char number;
|
||||||
__u8 revision;
|
unsigned char revision;
|
||||||
__u8 model;
|
unsigned char model;
|
||||||
__u8 family;
|
unsigned char family;
|
||||||
__u8 archrev;
|
unsigned char archrev;
|
||||||
char vendor[16];
|
char vendor[16];
|
||||||
char *model_name;
|
char *model_name;
|
||||||
|
|
||||||
|
@ -329,8 +329,8 @@ struct thread_struct {
|
||||||
#else
|
#else
|
||||||
# define INIT_THREAD_PM
|
# define INIT_THREAD_PM
|
||||||
#endif
|
#endif
|
||||||
__u64 dbr[IA64_NUM_DBG_REGS];
|
unsigned long dbr[IA64_NUM_DBG_REGS];
|
||||||
__u64 ibr[IA64_NUM_DBG_REGS];
|
unsigned long ibr[IA64_NUM_DBG_REGS];
|
||||||
struct ia64_fpreg fph[96]; /* saved/loaded on demand */
|
struct ia64_fpreg fph[96]; /* saved/loaded on demand */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -106,10 +106,10 @@ struct ia64_sal_retval {
|
||||||
* informational value should be printed (e.g., "reboot for
|
* informational value should be printed (e.g., "reboot for
|
||||||
* change to take effect").
|
* change to take effect").
|
||||||
*/
|
*/
|
||||||
s64 status;
|
long status;
|
||||||
u64 v0;
|
unsigned long v0;
|
||||||
u64 v1;
|
unsigned long v1;
|
||||||
u64 v2;
|
unsigned long v2;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...);
|
typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...);
|
||||||
|
|
|
@ -929,7 +929,7 @@ ia64_sn_sysctl_tio_clock_reset(nasid_t nasid)
|
||||||
/*
|
/*
|
||||||
* Get the associated ioboard type for a given nasid.
|
* Get the associated ioboard type for a given nasid.
|
||||||
*/
|
*/
|
||||||
static inline s64
|
static inline long
|
||||||
ia64_sn_sysctl_ioboard_get(nasid_t nasid, u16 *ioboard)
|
ia64_sn_sysctl_ioboard_get(nasid_t nasid, u16 *ioboard)
|
||||||
{
|
{
|
||||||
struct ia64_sal_retval isrv;
|
struct ia64_sal_retval isrv;
|
||||||
|
|
|
@ -2,10 +2,11 @@
|
||||||
#define _ASM_IA64_TYPES_H
|
#define _ASM_IA64_TYPES_H
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This file is never included by application software unless explicitly requested (e.g.,
|
* This file is never included by application software unless explicitly
|
||||||
* via linux/types.h) in which case the application is Linux specific so (user-) name
|
* requested (e.g., via linux/types.h) in which case the application is
|
||||||
* space pollution is not a major issue. However, for interoperability, libraries still
|
* Linux specific so (user-) name space pollution is not a major issue.
|
||||||
* need to be careful to avoid a name clashes.
|
* However, for interoperability, libraries still need to be careful to
|
||||||
|
* avoid naming clashes.
|
||||||
*
|
*
|
||||||
* Based on <asm-alpha/types.h>.
|
* Based on <asm-alpha/types.h>.
|
||||||
*
|
*
|
||||||
|
@ -13,7 +14,11 @@
|
||||||
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
|
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifdef __KERNEL__
|
||||||
|
#include <asm-generic/int-ll64.h>
|
||||||
|
#else
|
||||||
#include <asm-generic/int-l64.h>
|
#include <asm-generic/int-l64.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef __ASSEMBLY__
|
#ifdef __ASSEMBLY__
|
||||||
# define __IA64_UL(x) (x)
|
# define __IA64_UL(x) (x)
|
||||||
|
|
|
@ -310,11 +310,12 @@
|
||||||
#define __NR_inotify_init1 1318
|
#define __NR_inotify_init1 1318
|
||||||
#define __NR_preadv 1319
|
#define __NR_preadv 1319
|
||||||
#define __NR_pwritev 1320
|
#define __NR_pwritev 1320
|
||||||
|
#define __NR_rt_tgsigqueueinfo 1321
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
|
|
||||||
#define NR_syscalls 297 /* length of syscall table */
|
#define NR_syscalls 298 /* length of syscall table */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following defines stop scripts/checksyscalls.sh from complaining about
|
* The following defines stop scripts/checksyscalls.sh from complaining about
|
||||||
|
@ -328,6 +329,7 @@
|
||||||
#define __IGNORE_utime /* utimes() */
|
#define __IGNORE_utime /* utimes() */
|
||||||
#define __IGNORE_getpgrp /* getpgid() */
|
#define __IGNORE_getpgrp /* getpgid() */
|
||||||
#define __IGNORE_vfork /* clone() */
|
#define __IGNORE_vfork /* clone() */
|
||||||
|
#define __IGNORE_umount2 /* umount() */
|
||||||
|
|
||||||
#define __ARCH_WANT_SYS_RT_SIGACTION
|
#define __ARCH_WANT_SYS_RT_SIGACTION
|
||||||
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
|
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
|
||||||
|
|
|
@ -46,7 +46,7 @@ extern efi_status_t efi_call_phys (void *, ...);
|
||||||
struct efi efi;
|
struct efi efi;
|
||||||
EXPORT_SYMBOL(efi);
|
EXPORT_SYMBOL(efi);
|
||||||
static efi_runtime_services_t *runtime;
|
static efi_runtime_services_t *runtime;
|
||||||
static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
|
static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
|
||||||
|
|
||||||
#define efi_call_virt(f, args...) (*(f))(args)
|
#define efi_call_virt(f, args...) (*(f))(args)
|
||||||
|
|
||||||
|
@ -356,7 +356,7 @@ efi_get_pal_addr (void)
|
||||||
|
|
||||||
if (++pal_code_count > 1) {
|
if (++pal_code_count > 1) {
|
||||||
printk(KERN_ERR "Too many EFI Pal Code memory ranges, "
|
printk(KERN_ERR "Too many EFI Pal Code memory ranges, "
|
||||||
"dropped @ %lx\n", md->phys_addr);
|
"dropped @ %llx\n", md->phys_addr);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -490,10 +490,10 @@ efi_init (void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (min_addr != 0UL)
|
if (min_addr != 0UL)
|
||||||
printk(KERN_INFO "Ignoring memory below %luMB\n",
|
printk(KERN_INFO "Ignoring memory below %lluMB\n",
|
||||||
min_addr >> 20);
|
min_addr >> 20);
|
||||||
if (max_addr != ~0UL)
|
if (max_addr != ~0UL)
|
||||||
printk(KERN_INFO "Ignoring memory above %luMB\n",
|
printk(KERN_INFO "Ignoring memory above %lluMB\n",
|
||||||
max_addr >> 20);
|
max_addr >> 20);
|
||||||
|
|
||||||
efi.systab = __va(ia64_boot_param->efi_systab);
|
efi.systab = __va(ia64_boot_param->efi_systab);
|
||||||
|
@ -1066,7 +1066,7 @@ find_memmap_space (void)
|
||||||
* parts exist, and are WB.
|
* parts exist, and are WB.
|
||||||
*/
|
*/
|
||||||
unsigned long
|
unsigned long
|
||||||
efi_memmap_init(unsigned long *s, unsigned long *e)
|
efi_memmap_init(u64 *s, u64 *e)
|
||||||
{
|
{
|
||||||
struct kern_memdesc *k, *prev = NULL;
|
struct kern_memdesc *k, *prev = NULL;
|
||||||
u64 contig_low=0, contig_high=0;
|
u64 contig_low=0, contig_high=0;
|
||||||
|
|
|
@ -1805,6 +1805,7 @@ sys_call_table:
|
||||||
data8 sys_inotify_init1
|
data8 sys_inotify_init1
|
||||||
data8 sys_preadv
|
data8 sys_preadv
|
||||||
data8 sys_pwritev // 1320
|
data8 sys_pwritev // 1320
|
||||||
|
data8 sys_rt_tgsigqueueinfo
|
||||||
|
|
||||||
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
||||||
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
|
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
|
||||||
|
|
|
@ -451,7 +451,7 @@ iosapic_startup_edge_irq (unsigned int irq)
|
||||||
static void
|
static void
|
||||||
iosapic_ack_edge_irq (unsigned int irq)
|
iosapic_ack_edge_irq (unsigned int irq)
|
||||||
{
|
{
|
||||||
irq_desc_t *idesc = irq_desc + irq;
|
struct irq_desc *idesc = irq_desc + irq;
|
||||||
|
|
||||||
irq_complete_move(irq);
|
irq_complete_move(irq);
|
||||||
move_native_irq(irq);
|
move_native_irq(irq);
|
||||||
|
@ -600,8 +600,8 @@ static int
|
||||||
register_intr (unsigned int gsi, int irq, unsigned char delivery,
|
register_intr (unsigned int gsi, int irq, unsigned char delivery,
|
||||||
unsigned long polarity, unsigned long trigger)
|
unsigned long polarity, unsigned long trigger)
|
||||||
{
|
{
|
||||||
irq_desc_t *idesc;
|
struct irq_desc *idesc;
|
||||||
struct hw_interrupt_type *irq_type;
|
struct irq_chip *irq_type;
|
||||||
int index;
|
int index;
|
||||||
struct iosapic_rte_info *rte;
|
struct iosapic_rte_info *rte;
|
||||||
|
|
||||||
|
@ -650,7 +650,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
|
||||||
|
|
||||||
idesc = irq_desc + irq;
|
idesc = irq_desc + irq;
|
||||||
if (irq_type != NULL && idesc->chip != irq_type) {
|
if (irq_type != NULL && idesc->chip != irq_type) {
|
||||||
if (idesc->chip != &no_irq_type)
|
if (idesc->chip != &no_irq_chip)
|
||||||
printk(KERN_WARNING
|
printk(KERN_WARNING
|
||||||
"%s: changing vector %d from %s to %s\n",
|
"%s: changing vector %d from %s to %s\n",
|
||||||
__func__, irq_to_vector(irq),
|
__func__, irq_to_vector(irq),
|
||||||
|
@ -828,7 +828,7 @@ iosapic_unregister_intr (unsigned int gsi)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int irq, index;
|
int irq, index;
|
||||||
irq_desc_t *idesc;
|
struct irq_desc *idesc;
|
||||||
u32 low32;
|
u32 low32;
|
||||||
unsigned long trigger, polarity;
|
unsigned long trigger, polarity;
|
||||||
unsigned int dest;
|
unsigned int dest;
|
||||||
|
|
|
@ -130,7 +130,7 @@ unsigned int vectors_in_migration[NR_IRQS];
|
||||||
*/
|
*/
|
||||||
static void migrate_irqs(void)
|
static void migrate_irqs(void)
|
||||||
{
|
{
|
||||||
irq_desc_t *desc;
|
struct irq_desc *desc;
|
||||||
int irq, new_cpu;
|
int irq, new_cpu;
|
||||||
|
|
||||||
for (irq=0; irq < NR_IRQS; irq++) {
|
for (irq=0; irq < NR_IRQS; irq++) {
|
||||||
|
|
|
@ -630,7 +630,7 @@ static struct irqaction tlb_irqaction = {
|
||||||
void
|
void
|
||||||
ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
|
ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
|
||||||
{
|
{
|
||||||
irq_desc_t *desc;
|
struct irq_desc *desc;
|
||||||
unsigned int irq;
|
unsigned int irq;
|
||||||
|
|
||||||
irq = vec;
|
irq = vec;
|
||||||
|
|
|
@ -33,7 +33,7 @@ static int lsapic_retrigger(unsigned int irq)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct hw_interrupt_type irq_type_ia64_lsapic = {
|
struct irq_chip irq_type_ia64_lsapic = {
|
||||||
.name = "LSAPIC",
|
.name = "LSAPIC",
|
||||||
.startup = lsapic_noop_startup,
|
.startup = lsapic_noop_startup,
|
||||||
.shutdown = lsapic_noop,
|
.shutdown = lsapic_noop,
|
||||||
|
|
|
@ -850,7 +850,7 @@ EXPORT_SYMBOL(ia64_unreg_MCA_extension);
|
||||||
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat)
|
copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat)
|
||||||
{
|
{
|
||||||
u64 fslot, tslot, nat;
|
u64 fslot, tslot, nat;
|
||||||
*tr = *fr;
|
*tr = *fr;
|
||||||
|
@ -914,9 +914,9 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
|
||||||
struct switch_stack *old_sw;
|
struct switch_stack *old_sw;
|
||||||
unsigned size = sizeof(struct pt_regs) +
|
unsigned size = sizeof(struct pt_regs) +
|
||||||
sizeof(struct switch_stack) + 16;
|
sizeof(struct switch_stack) + 16;
|
||||||
u64 *old_bspstore, *old_bsp;
|
unsigned long *old_bspstore, *old_bsp;
|
||||||
u64 *new_bspstore, *new_bsp;
|
unsigned long *new_bspstore, *new_bsp;
|
||||||
u64 old_unat, old_rnat, new_rnat, nat;
|
unsigned long old_unat, old_rnat, new_rnat, nat;
|
||||||
u64 slots, loadrs = regs->loadrs;
|
u64 slots, loadrs = regs->loadrs;
|
||||||
u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
|
u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
|
||||||
u64 ar_bspstore = regs->ar_bspstore;
|
u64 ar_bspstore = regs->ar_bspstore;
|
||||||
|
@ -968,10 +968,10 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
|
||||||
* loadrs for the new stack and save it in the new pt_regs, where
|
* loadrs for the new stack and save it in the new pt_regs, where
|
||||||
* ia64_old_stack() can get it.
|
* ia64_old_stack() can get it.
|
||||||
*/
|
*/
|
||||||
old_bspstore = (u64 *)ar_bspstore;
|
old_bspstore = (unsigned long *)ar_bspstore;
|
||||||
old_bsp = (u64 *)ar_bsp;
|
old_bsp = (unsigned long *)ar_bsp;
|
||||||
slots = ia64_rse_num_regs(old_bspstore, old_bsp);
|
slots = ia64_rse_num_regs(old_bspstore, old_bsp);
|
||||||
new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET);
|
new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET);
|
||||||
new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
|
new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
|
||||||
regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
|
regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
|
||||||
|
|
||||||
|
@ -1917,9 +1917,9 @@ ia64_mca_init(void)
|
||||||
ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
|
ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
|
||||||
ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
|
ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
|
||||||
int i;
|
int i;
|
||||||
s64 rc;
|
long rc;
|
||||||
struct ia64_sal_retval isrv;
|
struct ia64_sal_retval isrv;
|
||||||
u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
|
unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
|
||||||
static struct notifier_block default_init_monarch_nb = {
|
static struct notifier_block default_init_monarch_nb = {
|
||||||
.notifier_call = default_monarch_init_process,
|
.notifier_call = default_monarch_init_process,
|
||||||
.priority = 0/* we need to notified last */
|
.priority = 0/* we need to notified last */
|
||||||
|
@ -2092,7 +2092,7 @@ ia64_mca_late_init(void)
|
||||||
cpe_poll_timer.function = ia64_mca_cpe_poll;
|
cpe_poll_timer.function = ia64_mca_cpe_poll;
|
||||||
|
|
||||||
{
|
{
|
||||||
irq_desc_t *desc;
|
struct irq_desc *desc;
|
||||||
unsigned int irq;
|
unsigned int irq;
|
||||||
|
|
||||||
if (cpe_vector >= 0) {
|
if (cpe_vector >= 0) {
|
||||||
|
|
|
@ -171,7 +171,8 @@ apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
|
if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
|
||||||
printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (int64_t) val);
|
printk(KERN_ERR "%s: value %ld out of IMM60 range\n",
|
||||||
|
mod->name, (long) val);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
ia64_patch_imm60((u64) insn, val);
|
ia64_patch_imm60((u64) insn, val);
|
||||||
|
@ -182,7 +183,8 @@ static int
|
||||||
apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
|
apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
|
||||||
{
|
{
|
||||||
if (val + (1 << 21) >= (1 << 22)) {
|
if (val + (1 << 21) >= (1 << 22)) {
|
||||||
printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val);
|
printk(KERN_ERR "%s: value %li out of IMM22 range\n",
|
||||||
|
mod->name, (long)val);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
|
ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
|
||||||
|
@ -196,7 +198,8 @@ static int
|
||||||
apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
|
apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
|
||||||
{
|
{
|
||||||
if (val + (1 << 20) >= (1 << 21)) {
|
if (val + (1 << 20) >= (1 << 21)) {
|
||||||
printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val);
|
printk(KERN_ERR "%s: value %li out of IMM21b range\n",
|
||||||
|
mod->name, (long)val);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
|
ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
|
||||||
|
@ -701,8 +704,9 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
|
||||||
case RV_PCREL2:
|
case RV_PCREL2:
|
||||||
if (r_type == R_IA64_PCREL21BI) {
|
if (r_type == R_IA64_PCREL21BI) {
|
||||||
if (!is_internal(mod, val)) {
|
if (!is_internal(mod, val)) {
|
||||||
printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n",
|
printk(KERN_ERR "%s: %s reloc against "
|
||||||
__func__, reloc_name[r_type], val);
|
"non-local symbol (%lx)\n", __func__,
|
||||||
|
reloc_name[r_type], (unsigned long)val);
|
||||||
return -ENOEXEC;
|
return -ENOEXEC;
|
||||||
}
|
}
|
||||||
format = RF_INSN21B;
|
format = RF_INSN21B;
|
||||||
|
|
|
@ -158,7 +158,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
struct irq_chip dmar_msi_type = {
|
static struct irq_chip dmar_msi_type = {
|
||||||
.name = "DMAR_MSI",
|
.name = "DMAR_MSI",
|
||||||
.unmask = dmar_msi_unmask,
|
.unmask = dmar_msi_unmask,
|
||||||
.mask = dmar_msi_mask,
|
.mask = dmar_msi_mask,
|
||||||
|
|
|
@ -218,10 +218,10 @@ static int
|
||||||
cache_info(char *page)
|
cache_info(char *page)
|
||||||
{
|
{
|
||||||
char *p = page;
|
char *p = page;
|
||||||
u64 i, levels, unique_caches;
|
unsigned long i, levels, unique_caches;
|
||||||
pal_cache_config_info_t cci;
|
pal_cache_config_info_t cci;
|
||||||
int j, k;
|
int j, k;
|
||||||
s64 status;
|
long status;
|
||||||
|
|
||||||
if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
|
if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
|
||||||
printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
|
printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
|
||||||
|
@ -303,7 +303,7 @@ vm_info(char *page)
|
||||||
ia64_ptce_info_t ptce;
|
ia64_ptce_info_t ptce;
|
||||||
const char *sep;
|
const char *sep;
|
||||||
int i, j;
|
int i, j;
|
||||||
s64 status;
|
long status;
|
||||||
|
|
||||||
if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
|
if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
|
||||||
printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
|
printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
|
||||||
|
@ -431,9 +431,9 @@ register_info(char *page)
|
||||||
char *p = page;
|
char *p = page;
|
||||||
u64 reg_info[2];
|
u64 reg_info[2];
|
||||||
u64 info;
|
u64 info;
|
||||||
u64 phys_stacked;
|
unsigned long phys_stacked;
|
||||||
pal_hints_u_t hints;
|
pal_hints_u_t hints;
|
||||||
u64 iregs, dregs;
|
unsigned long iregs, dregs;
|
||||||
char *info_type[]={
|
char *info_type[]={
|
||||||
"Implemented AR(s)",
|
"Implemented AR(s)",
|
||||||
"AR(s) with read side-effects",
|
"AR(s) with read side-effects",
|
||||||
|
@ -530,8 +530,8 @@ static char **proc_features[]={
|
||||||
NULL, NULL, NULL, NULL,
|
NULL, NULL, NULL, NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static char *
|
static char * feature_set_info(char *page, u64 avail, u64 status, u64 control,
|
||||||
feature_set_info(char *page, u64 avail, u64 status, u64 control, u64 set)
|
unsigned long set)
|
||||||
{
|
{
|
||||||
char *p = page;
|
char *p = page;
|
||||||
char **vf, **v;
|
char **vf, **v;
|
||||||
|
@ -714,7 +714,7 @@ frequency_info(char *page)
|
||||||
{
|
{
|
||||||
char *p = page;
|
char *p = page;
|
||||||
struct pal_freq_ratio proc, itc, bus;
|
struct pal_freq_ratio proc, itc, bus;
|
||||||
u64 base;
|
unsigned long base;
|
||||||
|
|
||||||
if (ia64_pal_freq_base(&base) == -1)
|
if (ia64_pal_freq_base(&base) == -1)
|
||||||
p += sprintf(p, "Output clock : not implemented\n");
|
p += sprintf(p, "Output clock : not implemented\n");
|
||||||
|
@ -736,43 +736,43 @@ static int
|
||||||
tr_info(char *page)
|
tr_info(char *page)
|
||||||
{
|
{
|
||||||
char *p = page;
|
char *p = page;
|
||||||
s64 status;
|
long status;
|
||||||
pal_tr_valid_u_t tr_valid;
|
pal_tr_valid_u_t tr_valid;
|
||||||
u64 tr_buffer[4];
|
u64 tr_buffer[4];
|
||||||
pal_vm_info_1_u_t vm_info_1;
|
pal_vm_info_1_u_t vm_info_1;
|
||||||
pal_vm_info_2_u_t vm_info_2;
|
pal_vm_info_2_u_t vm_info_2;
|
||||||
u64 i, j;
|
unsigned long i, j;
|
||||||
u64 max[3], pgm;
|
unsigned long max[3], pgm;
|
||||||
struct ifa_reg {
|
struct ifa_reg {
|
||||||
u64 valid:1;
|
unsigned long valid:1;
|
||||||
u64 ig:11;
|
unsigned long ig:11;
|
||||||
u64 vpn:52;
|
unsigned long vpn:52;
|
||||||
} *ifa_reg;
|
} *ifa_reg;
|
||||||
struct itir_reg {
|
struct itir_reg {
|
||||||
u64 rv1:2;
|
unsigned long rv1:2;
|
||||||
u64 ps:6;
|
unsigned long ps:6;
|
||||||
u64 key:24;
|
unsigned long key:24;
|
||||||
u64 rv2:32;
|
unsigned long rv2:32;
|
||||||
} *itir_reg;
|
} *itir_reg;
|
||||||
struct gr_reg {
|
struct gr_reg {
|
||||||
u64 p:1;
|
unsigned long p:1;
|
||||||
u64 rv1:1;
|
unsigned long rv1:1;
|
||||||
u64 ma:3;
|
unsigned long ma:3;
|
||||||
u64 a:1;
|
unsigned long a:1;
|
||||||
u64 d:1;
|
unsigned long d:1;
|
||||||
u64 pl:2;
|
unsigned long pl:2;
|
||||||
u64 ar:3;
|
unsigned long ar:3;
|
||||||
u64 ppn:38;
|
unsigned long ppn:38;
|
||||||
u64 rv2:2;
|
unsigned long rv2:2;
|
||||||
u64 ed:1;
|
unsigned long ed:1;
|
||||||
u64 ig:11;
|
unsigned long ig:11;
|
||||||
} *gr_reg;
|
} *gr_reg;
|
||||||
struct rid_reg {
|
struct rid_reg {
|
||||||
u64 ig1:1;
|
unsigned long ig1:1;
|
||||||
u64 rv1:1;
|
unsigned long rv1:1;
|
||||||
u64 ig2:6;
|
unsigned long ig2:6;
|
||||||
u64 rid:24;
|
unsigned long rid:24;
|
||||||
u64 rv2:32;
|
unsigned long rv2:32;
|
||||||
} *rid_reg;
|
} *rid_reg;
|
||||||
|
|
||||||
if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
|
if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
|
||||||
|
|
|
@ -19,6 +19,8 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
#include <asm/paravirt.h>
|
#include <asm/paravirt.h>
|
||||||
|
|
||||||
#define DECLARE(name) \
|
#define DECLARE(name) \
|
||||||
|
|
|
@ -91,7 +91,7 @@ int iommu_dma_supported(struct device *dev, u64 mask)
|
||||||
type. Normally this doesn't make any difference, but gives
|
type. Normally this doesn't make any difference, but gives
|
||||||
more gentle handling of IOMMU overflow. */
|
more gentle handling of IOMMU overflow. */
|
||||||
if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
|
if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
|
||||||
dev_info(dev, "Force SAC with mask %lx\n", mask);
|
dev_info(dev, "Force SAC with mask %llx\n", mask);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -312,7 +312,7 @@ typedef struct pfm_context {
|
||||||
unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
|
unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
|
||||||
unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
|
unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
|
||||||
|
|
||||||
u64 ctx_saved_psr_up; /* only contains psr.up value */
|
unsigned long ctx_saved_psr_up; /* only contains psr.up value */
|
||||||
|
|
||||||
unsigned long ctx_last_activation; /* context last activation number for last_cpu */
|
unsigned long ctx_last_activation; /* context last activation number for last_cpu */
|
||||||
unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
|
unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
|
||||||
|
@ -5213,8 +5213,8 @@ pfm_end_notify_user(pfm_context_t *ctx)
|
||||||
* main overflow processing routine.
|
* main overflow processing routine.
|
||||||
* it can be called from the interrupt path or explicitly during the context switch code
|
* it can be called from the interrupt path or explicitly during the context switch code
|
||||||
*/
|
*/
|
||||||
static void
|
static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
|
||||||
pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
|
unsigned long pmc0, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
pfm_ovfl_arg_t *ovfl_arg;
|
pfm_ovfl_arg_t *ovfl_arg;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
|
|
|
@ -151,9 +151,9 @@ int num_rsvd_regions __initdata;
|
||||||
* This routine does not assume the incoming segments are sorted.
|
* This routine does not assume the incoming segments are sorted.
|
||||||
*/
|
*/
|
||||||
int __init
|
int __init
|
||||||
filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
|
filter_rsvd_memory (u64 start, u64 end, void *arg)
|
||||||
{
|
{
|
||||||
unsigned long range_start, range_end, prev_start;
|
u64 range_start, range_end, prev_start;
|
||||||
void (*func)(unsigned long, unsigned long, int);
|
void (*func)(unsigned long, unsigned long, int);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -191,7 +191,7 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
|
||||||
* are not filtered out.
|
* are not filtered out.
|
||||||
*/
|
*/
|
||||||
int __init
|
int __init
|
||||||
filter_memory(unsigned long start, unsigned long end, void *arg)
|
filter_memory(u64 start, u64 end, void *arg)
|
||||||
{
|
{
|
||||||
void (*func)(unsigned long, unsigned long, int);
|
void (*func)(unsigned long, unsigned long, int);
|
||||||
|
|
||||||
|
@ -397,7 +397,7 @@ find_initrd (void)
|
||||||
initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
|
initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
|
||||||
initrd_end = initrd_start+ia64_boot_param->initrd_size;
|
initrd_end = initrd_start+ia64_boot_param->initrd_size;
|
||||||
|
|
||||||
printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
|
printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n",
|
||||||
initrd_start, ia64_boot_param->initrd_size);
|
initrd_start, ia64_boot_param->initrd_size);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -505,9 +505,9 @@ static int __init parse_elfcorehdr(char *arg)
|
||||||
}
|
}
|
||||||
early_param("elfcorehdr", parse_elfcorehdr);
|
early_param("elfcorehdr", parse_elfcorehdr);
|
||||||
|
|
||||||
int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end)
|
int __init reserve_elfcorehdr(u64 *start, u64 *end)
|
||||||
{
|
{
|
||||||
unsigned long length;
|
u64 length;
|
||||||
|
|
||||||
/* We get the address using the kernel command line,
|
/* We get the address using the kernel command line,
|
||||||
* but the size is extracted from the EFI tables.
|
* but the size is extracted from the EFI tables.
|
||||||
|
@ -588,7 +588,7 @@ setup_arch (char **cmdline_p)
|
||||||
ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
|
ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
|
||||||
#else
|
#else
|
||||||
{
|
{
|
||||||
u64 num_phys_stacked;
|
unsigned long num_phys_stacked;
|
||||||
|
|
||||||
if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
|
if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
|
||||||
ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
|
ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
|
||||||
|
@ -872,9 +872,9 @@ static void __cpuinit
|
||||||
get_cache_info(void)
|
get_cache_info(void)
|
||||||
{
|
{
|
||||||
unsigned long line_size, max = 1;
|
unsigned long line_size, max = 1;
|
||||||
u64 l, levels, unique_caches;
|
unsigned long l, levels, unique_caches;
|
||||||
pal_cache_config_info_t cci;
|
pal_cache_config_info_t cci;
|
||||||
s64 status;
|
long status;
|
||||||
|
|
||||||
status = ia64_pal_cache_summary(&levels, &unique_caches);
|
status = ia64_pal_cache_summary(&levels, &unique_caches);
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
|
@ -892,9 +892,9 @@ get_cache_info(void)
|
||||||
/* cache_type (data_or_unified)=2 */
|
/* cache_type (data_or_unified)=2 */
|
||||||
status = ia64_pal_cache_config_info(l, 2, &cci);
|
status = ia64_pal_cache_config_info(l, 2, &cci);
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR "%s: ia64_pal_cache_config_info"
|
||||||
"%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
|
"(l=%lu, 2) failed (status=%ld)\n",
|
||||||
__func__, l, status);
|
__func__, l, status);
|
||||||
max = SMP_CACHE_BYTES;
|
max = SMP_CACHE_BYTES;
|
||||||
/* The safest setup for "flush_icache_range()" */
|
/* The safest setup for "flush_icache_range()" */
|
||||||
cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
|
cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
|
||||||
|
@ -914,10 +914,10 @@ get_cache_info(void)
|
||||||
/* cache_type (instruction)=1*/
|
/* cache_type (instruction)=1*/
|
||||||
status = ia64_pal_cache_config_info(l, 1, &cci);
|
status = ia64_pal_cache_config_info(l, 1, &cci);
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR "%s: ia64_pal_cache_config_info"
|
||||||
"%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
|
"(l=%lu, 1) failed (status=%ld)\n",
|
||||||
__func__, l, status);
|
__func__, l, status);
|
||||||
/* The safest setup for "flush_icache_range()" */
|
/* The safest setup for flush_icache_range() */
|
||||||
cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
|
cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cachelin
|
||||||
#define IPI_KDUMP_CPU_STOP 3
|
#define IPI_KDUMP_CPU_STOP 3
|
||||||
|
|
||||||
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
|
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
|
||||||
static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
|
static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation);
|
||||||
|
|
||||||
extern void cpu_halt (void);
|
extern void cpu_halt (void);
|
||||||
|
|
||||||
|
|
|
@ -678,7 +678,7 @@ extern void fixup_irqs(void);
|
||||||
int migrate_platform_irqs(unsigned int cpu)
|
int migrate_platform_irqs(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int new_cpei_cpu;
|
int new_cpei_cpu;
|
||||||
irq_desc_t *desc = NULL;
|
struct irq_desc *desc = NULL;
|
||||||
const struct cpumask *mask;
|
const struct cpumask *mask;
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
|
|
||||||
|
@ -865,7 +865,7 @@ init_smp_config(void)
|
||||||
void __devinit
|
void __devinit
|
||||||
identify_siblings(struct cpuinfo_ia64 *c)
|
identify_siblings(struct cpuinfo_ia64 *c)
|
||||||
{
|
{
|
||||||
s64 status;
|
long status;
|
||||||
u16 pltid;
|
u16 pltid;
|
||||||
pal_logical_to_physical_t info;
|
pal_logical_to_physical_t info;
|
||||||
|
|
||||||
|
|
|
@ -385,7 +385,7 @@ ia64_init_itm (void)
|
||||||
|
|
||||||
static cycle_t itc_get_cycles(struct clocksource *cs)
|
static cycle_t itc_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
u64 lcycle, now, ret;
|
unsigned long lcycle, now, ret;
|
||||||
|
|
||||||
if (!itc_jitter_data.itc_jitter)
|
if (!itc_jitter_data.itc_jitter)
|
||||||
return get_cycles();
|
return get_cycles();
|
||||||
|
|
|
@ -306,10 +306,10 @@ static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
|
||||||
|
|
||||||
static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
|
static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
|
||||||
{
|
{
|
||||||
u64 i, levels, unique_caches;
|
unsigned long i, levels, unique_caches;
|
||||||
pal_cache_config_info_t cci;
|
pal_cache_config_info_t cci;
|
||||||
int j;
|
int j;
|
||||||
s64 status;
|
long status;
|
||||||
struct cache_info *this_cache;
|
struct cache_info *this_cache;
|
||||||
int num_cache_leaves = 0;
|
int num_cache_leaves = 0;
|
||||||
|
|
||||||
|
|
|
@ -250,8 +250,7 @@ EXPORT_SYMBOL(uncached_free_page);
|
||||||
* Called at boot time to build a map of pages that can be used for
|
* Called at boot time to build a map of pages that can be used for
|
||||||
* memory special operations.
|
* memory special operations.
|
||||||
*/
|
*/
|
||||||
static int __init uncached_build_memmap(unsigned long uc_start,
|
static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
|
||||||
unsigned long uc_end, void *arg)
|
|
||||||
{
|
{
|
||||||
int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
|
int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
|
||||||
struct gen_pool *pool = uncached_pools[nid].pool;
|
struct gen_pool *pool = uncached_pools[nid].pool;
|
||||||
|
|
|
@ -107,10 +107,10 @@ unsigned long bootmap_start;
|
||||||
* bootmap_start. This address must be page-aligned.
|
* bootmap_start. This address must be page-aligned.
|
||||||
*/
|
*/
|
||||||
static int __init
|
static int __init
|
||||||
find_bootmap_location (unsigned long start, unsigned long end, void *arg)
|
find_bootmap_location (u64 start, u64 end, void *arg)
|
||||||
{
|
{
|
||||||
unsigned long needed = *(unsigned long *)arg;
|
u64 needed = *(unsigned long *)arg;
|
||||||
unsigned long range_start, range_end, free_start;
|
u64 range_start, range_end, free_start;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
#if IGNORE_PFN0
|
#if IGNORE_PFN0
|
||||||
|
@ -229,8 +229,7 @@ find_memory (void)
|
||||||
alloc_per_cpu_data();
|
alloc_per_cpu_data();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int count_pages(u64 start, u64 end, void *arg)
|
||||||
count_pages (u64 start, u64 end, void *arg)
|
|
||||||
{
|
{
|
||||||
unsigned long *count = arg;
|
unsigned long *count = arg;
|
||||||
|
|
||||||
|
|
|
@ -422,8 +422,7 @@ int vmemmap_find_next_valid_pfn(int node, int i)
|
||||||
return hole_next_pfn - pgdat->node_start_pfn;
|
return hole_next_pfn - pgdat->node_start_pfn;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __init
|
int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
|
||||||
create_mem_map_page_table (u64 start, u64 end, void *arg)
|
|
||||||
{
|
{
|
||||||
unsigned long address, start_page, end_page;
|
unsigned long address, start_page, end_page;
|
||||||
struct page *map_start, *map_end;
|
struct page *map_start, *map_end;
|
||||||
|
@ -469,7 +468,7 @@ struct memmap_init_callback_data {
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __meminit
|
static int __meminit
|
||||||
virtual_memmap_init (u64 start, u64 end, void *arg)
|
virtual_memmap_init(u64 start, u64 end, void *arg)
|
||||||
{
|
{
|
||||||
struct memmap_init_callback_data *args;
|
struct memmap_init_callback_data *args;
|
||||||
struct page *map_start, *map_end;
|
struct page *map_start, *map_end;
|
||||||
|
@ -531,8 +530,7 @@ ia64_pfn_valid (unsigned long pfn)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ia64_pfn_valid);
|
EXPORT_SYMBOL(ia64_pfn_valid);
|
||||||
|
|
||||||
int __init
|
int __init find_largest_hole(u64 start, u64 end, void *arg)
|
||||||
find_largest_hole (u64 start, u64 end, void *arg)
|
|
||||||
{
|
{
|
||||||
u64 *max_gap = arg;
|
u64 *max_gap = arg;
|
||||||
|
|
||||||
|
@ -548,8 +546,7 @@ find_largest_hole (u64 start, u64 end, void *arg)
|
||||||
|
|
||||||
#endif /* CONFIG_VIRTUAL_MEM_MAP */
|
#endif /* CONFIG_VIRTUAL_MEM_MAP */
|
||||||
|
|
||||||
int __init
|
int __init register_active_ranges(u64 start, u64 len, int nid)
|
||||||
register_active_ranges(u64 start, u64 len, int nid)
|
|
||||||
{
|
{
|
||||||
u64 end = start + len;
|
u64 end = start + len;
|
||||||
|
|
||||||
|
@ -567,7 +564,7 @@ register_active_ranges(u64 start, u64 len, int nid)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init
|
static int __init
|
||||||
count_reserved_pages (u64 start, u64 end, void *arg)
|
count_reserved_pages(u64 start, u64 end, void *arg)
|
||||||
{
|
{
|
||||||
unsigned long num_reserved = 0;
|
unsigned long num_reserved = 0;
|
||||||
unsigned long *count = arg;
|
unsigned long *count = arg;
|
||||||
|
@ -580,7 +577,7 @@ count_reserved_pages (u64 start, u64 end, void *arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg)
|
find_max_min_low_pfn (u64 start, u64 end, void *arg)
|
||||||
{
|
{
|
||||||
unsigned long pfn_start, pfn_end;
|
unsigned long pfn_start, pfn_end;
|
||||||
#ifdef CONFIG_FLATMEM
|
#ifdef CONFIG_FLATMEM
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
#include <asm/tlb.h>
|
#include <asm/tlb.h>
|
||||||
|
|
||||||
static struct {
|
static struct {
|
||||||
unsigned long mask; /* mask of supported purge page-sizes */
|
u64 mask; /* mask of supported purge page-sizes */
|
||||||
unsigned long max_bits; /* log2 of largest supported purge page-size */
|
unsigned long max_bits; /* log2 of largest supported purge page-size */
|
||||||
} purge;
|
} purge;
|
||||||
|
|
||||||
|
@ -328,7 +328,7 @@ void __devinit
|
||||||
ia64_tlb_init (void)
|
ia64_tlb_init (void)
|
||||||
{
|
{
|
||||||
ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
|
ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
|
||||||
unsigned long tr_pgbits;
|
u64 tr_pgbits;
|
||||||
long status;
|
long status;
|
||||||
pal_vm_info_1_u_t vm_info_1;
|
pal_vm_info_1_u_t vm_info_1;
|
||||||
pal_vm_info_2_u_t vm_info_2;
|
pal_vm_info_2_u_t vm_info_2;
|
||||||
|
|
|
@ -163,7 +163,7 @@ add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr)
|
||||||
{
|
{
|
||||||
struct resource *resource;
|
struct resource *resource;
|
||||||
char *name;
|
char *name;
|
||||||
u64 base, min, max, base_port;
|
unsigned long base, min, max, base_port;
|
||||||
unsigned int sparse = 0, space_nr, len;
|
unsigned int sparse = 0, space_nr, len;
|
||||||
|
|
||||||
resource = kzalloc(sizeof(*resource), GFP_KERNEL);
|
resource = kzalloc(sizeof(*resource), GFP_KERNEL);
|
||||||
|
@ -292,7 +292,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
|
||||||
window->offset = offset;
|
window->offset = offset;
|
||||||
|
|
||||||
if (insert_resource(root, &window->resource)) {
|
if (insert_resource(root, &window->resource)) {
|
||||||
printk(KERN_ERR "alloc 0x%lx-0x%lx from %s for %s failed\n",
|
printk(KERN_ERR "alloc 0x%llx-0x%llx from %s for %s failed\n",
|
||||||
window->resource.start, window->resource.end,
|
window->resource.start, window->resource.end,
|
||||||
root->name, info->name);
|
root->name, info->name);
|
||||||
}
|
}
|
||||||
|
@ -314,8 +314,8 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
|
||||||
(res->end - res->start < 16))
|
(res->end - res->start < 16))
|
||||||
continue;
|
continue;
|
||||||
if (j >= PCI_BUS_NUM_RESOURCES) {
|
if (j >= PCI_BUS_NUM_RESOURCES) {
|
||||||
printk("Ignoring range [%lx-%lx] (%lx)\n", res->start,
|
printk("Ignoring range [%#llx-%#llx] (%lx)\n",
|
||||||
res->end, res->flags);
|
res->start, res->end, res->flags);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
bus->resource[j++] = res;
|
bus->resource[j++] = res;
|
||||||
|
@ -728,8 +728,8 @@ extern u8 pci_cache_line_size;
|
||||||
*/
|
*/
|
||||||
static void __init set_pci_cacheline_size(void)
|
static void __init set_pci_cacheline_size(void)
|
||||||
{
|
{
|
||||||
u64 levels, unique_caches;
|
unsigned long levels, unique_caches;
|
||||||
s64 status;
|
long status;
|
||||||
pal_cache_config_info_t cci;
|
pal_cache_config_info_t cci;
|
||||||
|
|
||||||
status = ia64_pal_cache_summary(&levels, &unique_caches);
|
status = ia64_pal_cache_summary(&levels, &unique_caches);
|
||||||
|
|
|
@ -40,7 +40,7 @@ struct sn_pcidev_match {
|
||||||
/*
|
/*
|
||||||
* Perform the early IO init in PROM.
|
* Perform the early IO init in PROM.
|
||||||
*/
|
*/
|
||||||
static s64
|
static long
|
||||||
sal_ioif_init(u64 *result)
|
sal_ioif_init(u64 *result)
|
||||||
{
|
{
|
||||||
struct ia64_sal_retval isrv = {0,0,0,0};
|
struct ia64_sal_retval isrv = {0,0,0,0};
|
||||||
|
@ -492,7 +492,7 @@ void __init
|
||||||
sn_io_acpi_init(void)
|
sn_io_acpi_init(void)
|
||||||
{
|
{
|
||||||
u64 result;
|
u64 result;
|
||||||
s64 status;
|
long status;
|
||||||
|
|
||||||
/* SN Altix does not follow the IOSAPIC IRQ routing model */
|
/* SN Altix does not follow the IOSAPIC IRQ routing model */
|
||||||
acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
|
acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
|
||||||
|
|
|
@ -342,7 +342,7 @@ sn_common_bus_fixup(struct pci_bus *bus,
|
||||||
struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
|
struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
|
||||||
|
|
||||||
printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u "
|
printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u "
|
||||||
"L_IO=%lx L_MEM=%lx BASE=%lx\n",
|
"L_IO=%llx L_MEM=%llx BASE=%llx\n",
|
||||||
b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
|
b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
|
||||||
b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
|
b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
|
||||||
printk(KERN_WARNING "on node %d but only %d nodes online."
|
printk(KERN_WARNING "on node %d but only %d nodes online."
|
||||||
|
|
|
@ -295,13 +295,13 @@ unsigned int sn_local_vector_to_irq(u8 vector)
|
||||||
void sn_irq_init(void)
|
void sn_irq_init(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
irq_desc_t *base_desc = irq_desc;
|
struct irq_desc *base_desc = irq_desc;
|
||||||
|
|
||||||
ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
|
ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
|
||||||
ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
|
ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
|
||||||
|
|
||||||
for (i = 0; i < NR_IRQS; i++) {
|
for (i = 0; i < NR_IRQS; i++) {
|
||||||
if (base_desc[i].chip == &no_irq_type) {
|
if (base_desc[i].chip == &no_irq_chip) {
|
||||||
base_desc[i].chip = &irq_type_sn;
|
base_desc[i].chip = &irq_type_sn;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -377,7 +377,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
|
||||||
int cpu = nasid_slice_to_cpuid(nasid, slice);
|
int cpu = nasid_slice_to_cpuid(nasid, slice);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
int cpuphys;
|
int cpuphys;
|
||||||
irq_desc_t *desc;
|
struct irq_desc *desc;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
pci_dev_get(pci_dev);
|
pci_dev_get(pci_dev);
|
||||||
|
|
|
@ -414,7 +414,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
|
||||||
}
|
}
|
||||||
seq_printf(s, "partition %u %s local "
|
seq_printf(s, "partition %u %s local "
|
||||||
"shubtype %s, "
|
"shubtype %s, "
|
||||||
"nasid_mask 0x%016lx, "
|
"nasid_mask 0x%016llx, "
|
||||||
"nasid_bits %d:%d, "
|
"nasid_bits %d:%d, "
|
||||||
"system_size %d, "
|
"system_size %d, "
|
||||||
"sharing_size %d, "
|
"sharing_size %d, "
|
||||||
|
@ -683,7 +683,7 @@ static int sn_hwperf_map_err(int hwperf_err)
|
||||||
* ioctl for "sn_hwperf" misc device
|
* ioctl for "sn_hwperf" misc device
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
|
sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg)
|
||||||
{
|
{
|
||||||
struct sn_hwperf_ioctl_args a;
|
struct sn_hwperf_ioctl_args a;
|
||||||
struct cpuinfo_ia64 *cdata;
|
struct cpuinfo_ia64 *cdata;
|
||||||
|
|
|
@ -36,7 +36,7 @@ static int system_serial_number_open(struct inode *inode, struct file *file)
|
||||||
|
|
||||||
static int licenseID_show(struct seq_file *s, void *p)
|
static int licenseID_show(struct seq_file *s, void *p)
|
||||||
{
|
{
|
||||||
seq_printf(s, "0x%lx\n", sn_partition_serial_number_val());
|
seq_printf(s, "0x%llx\n", sn_partition_serial_number_val());
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -368,7 +368,7 @@ static void tio_corelet_reset(nasid_t nasid, int corelet)
|
||||||
static int is_fpga_tio(int nasid, int *bt)
|
static int is_fpga_tio(int nasid, int *bt)
|
||||||
{
|
{
|
||||||
u16 uninitialized_var(ioboard_type); /* GCC be quiet */
|
u16 uninitialized_var(ioboard_type); /* GCC be quiet */
|
||||||
s64 rc;
|
long rc;
|
||||||
|
|
||||||
rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard_type);
|
rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard_type);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
|
|
@ -79,7 +79,7 @@ static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
|
||||||
|
|
||||||
u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
|
u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
|
||||||
{
|
{
|
||||||
s64 rc;
|
long rc;
|
||||||
u16 uninitialized_var(ioboard); /* GCC be quiet */
|
u16 uninitialized_var(ioboard); /* GCC be quiet */
|
||||||
nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
|
nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
|
||||||
|
|
||||||
|
|
|
@ -123,7 +123,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
|
||||||
|
|
||||||
if (!tmp) {
|
if (!tmp) {
|
||||||
printk(KERN_ERR "%s: Could not allocate "
|
printk(KERN_ERR "%s: Could not allocate "
|
||||||
"%lu bytes (order %d) for GART\n",
|
"%llu bytes (order %d) for GART\n",
|
||||||
__func__,
|
__func__,
|
||||||
tioca_kern->ca_gart_size,
|
tioca_kern->ca_gart_size,
|
||||||
get_order(tioca_kern->ca_gart_size));
|
get_order(tioca_kern->ca_gart_size));
|
||||||
|
@ -348,7 +348,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
|
||||||
agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn);
|
agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn);
|
||||||
if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
|
if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
|
||||||
printk(KERN_ERR "%s: coretalk upper node (%u) "
|
printk(KERN_ERR "%s: coretalk upper node (%u) "
|
||||||
"mismatch with ca_agp_dma_addr_extn (%lu)\n",
|
"mismatch with ca_agp_dma_addr_extn (%llu)\n",
|
||||||
__func__,
|
__func__,
|
||||||
node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
|
node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -367,7 +367,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
|
||||||
* dma_addr_t is guaranteed to be contiguous in CA bus space.
|
* dma_addr_t is guaranteed to be contiguous in CA bus space.
|
||||||
*/
|
*/
|
||||||
static dma_addr_t
|
static dma_addr_t
|
||||||
tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
|
tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
|
||||||
{
|
{
|
||||||
int i, ps, ps_shift, entry, entries, mapsize, last_entry;
|
int i, ps, ps_shift, entry, entries, mapsize, last_entry;
|
||||||
u64 xio_addr, end_xio_addr;
|
u64 xio_addr, end_xio_addr;
|
||||||
|
|
|
@ -493,7 +493,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
|
||||||
|
|
||||||
if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) {
|
if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) {
|
||||||
printk(KERN_WARNING
|
printk(KERN_WARNING
|
||||||
"%s: %s - no map found for bus_addr 0x%lx\n",
|
"%s: %s - no map found for bus_addr 0x%llx\n",
|
||||||
__func__, pci_name(pdev), bus_addr);
|
__func__, pci_name(pdev), bus_addr);
|
||||||
} else if (--map->refcnt == 0) {
|
} else if (--map->refcnt == 0) {
|
||||||
for (i = 0; i < map->ate_count; i++) {
|
for (i = 0; i < map->ate_count; i++) {
|
||||||
|
@ -642,7 +642,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
|
||||||
* in the address.
|
* in the address.
|
||||||
*/
|
*/
|
||||||
static u64
|
static u64
|
||||||
tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
|
tioce_dma(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
|
||||||
{
|
{
|
||||||
return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);
|
return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);
|
||||||
}
|
}
|
||||||
|
@ -657,7 +657,7 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
|
||||||
* in the address.
|
* in the address.
|
||||||
*/
|
*/
|
||||||
static u64
|
static u64
|
||||||
tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
|
tioce_dma_consistent(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
|
||||||
{
|
{
|
||||||
return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
|
return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -138,7 +138,7 @@ static void
|
||||||
__xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
|
__xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
|
||||||
struct irqaction *action, int save)
|
struct irqaction *action, int save)
|
||||||
{
|
{
|
||||||
irq_desc_t *desc;
|
struct irq_desc *desc;
|
||||||
int irq = 0;
|
int irq = 0;
|
||||||
|
|
||||||
if (xen_slab_ready) {
|
if (xen_slab_ready) {
|
||||||
|
|
|
@ -518,8 +518,9 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
|
||||||
if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
|
if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
|
||||||
return AE_OK;
|
return AE_OK;
|
||||||
|
|
||||||
printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset (ioc=%lx, lba=%lx)\n",
|
printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset "
|
||||||
(char *) context, sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
|
"(ioc=%llx, lba=%llx)\n", (char *)context,
|
||||||
|
sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
|
||||||
|
|
||||||
hp_zx1_gart_found = 1;
|
hp_zx1_gart_found = 1;
|
||||||
return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
|
return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
|
||||||
|
|
|
@ -28,10 +28,10 @@ setup_serial_console(struct pcdp_uart *uart)
|
||||||
char parity;
|
char parity;
|
||||||
|
|
||||||
mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
|
mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
|
||||||
p += sprintf(p, "uart8250,%s,0x%lx",
|
p += sprintf(p, "uart8250,%s,0x%llx",
|
||||||
mmio ? "mmio" : "io", uart->addr.address);
|
mmio ? "mmio" : "io", uart->addr.address);
|
||||||
if (uart->baud) {
|
if (uart->baud) {
|
||||||
p += sprintf(p, ",%lu", uart->baud);
|
p += sprintf(p, ",%llu", uart->baud);
|
||||||
if (uart->bits) {
|
if (uart->bits) {
|
||||||
switch (uart->parity) {
|
switch (uart->parity) {
|
||||||
case 0x2: parity = 'e'; break;
|
case 0x2: parity = 'e'; break;
|
||||||
|
|
|
@ -101,7 +101,7 @@ typedef struct {
|
||||||
u64 attribute;
|
u64 attribute;
|
||||||
} efi_memory_desc_t;
|
} efi_memory_desc_t;
|
||||||
|
|
||||||
typedef int (*efi_freemem_callback_t) (unsigned long start, unsigned long end, void *arg);
|
typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Types and defines for Time Services
|
* Types and defines for Time Services
|
||||||
|
|
Loading…
Reference in a new issue