Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cleanups from Ingo Molnar:
 "Misc smaller cleanups"

* 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/lib: Fix spelling, put space between a numeral and its units
  x86/lib: Fix spelling in the comments
  x86, quirks: Shut-up a long-standing gcc warning
  x86, msr: Unify variable names
  x86-64, docs, mm: Add vsyscall range to virtual address space layout
  x86: Drop KERNEL_IMAGE_START
  x86_64: Use __BOOT_DS instead_of __KERNEL_DS for safety
This commit is contained in:
Linus Torvalds 2013-04-30 08:34:07 -07:00
commit 874f6d1be7
10 changed files with 33 additions and 28 deletions

View file

@ -13,7 +13,9 @@ ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
... unused hole ... ... unused hole ...
ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0 ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
ffffffffa0000000 - fffffffffff00000 (=1536 MB) module mapping space ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
The direct mapping covers all memory in the system up to the highest The direct mapping covers all memory in the system up to the highest
memory address (this means in some cases it can also include PCI memory memory address (this means in some cases it can also include PCI memory

View file

@ -52,7 +52,7 @@ ENTRY(startup_32)
jnz 1f jnz 1f
cli cli
movl $(__KERNEL_DS), %eax movl $(__BOOT_DS), %eax
movl %eax, %ds movl %eax, %ds
movl %eax, %es movl %eax, %es
movl %eax, %ss movl %eax, %ss

View file

@ -137,11 +137,11 @@ static inline unsigned long long native_read_pmc(int counter)
* pointer indirection), this allows gcc to optimize better * pointer indirection), this allows gcc to optimize better
*/ */
#define rdmsr(msr, val1, val2) \ #define rdmsr(msr, low, high) \
do { \ do { \
u64 __val = native_read_msr((msr)); \ u64 __val = native_read_msr((msr)); \
(void)((val1) = (u32)__val); \ (void)((low) = (u32)__val); \
(void)((val2) = (u32)(__val >> 32)); \ (void)((high) = (u32)(__val >> 32)); \
} while (0) } while (0)
static inline void wrmsr(unsigned msr, unsigned low, unsigned high) static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
@ -162,12 +162,12 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
} }
/* rdmsr with exception handling */ /* rdmsr with exception handling */
#define rdmsr_safe(msr, p1, p2) \ #define rdmsr_safe(msr, low, high) \
({ \ ({ \
int __err; \ int __err; \
u64 __val = native_read_msr_safe((msr), &__err); \ u64 __val = native_read_msr_safe((msr), &__err); \
(*p1) = (u32)__val; \ (*low) = (u32)__val; \
(*p2) = (u32)(__val >> 32); \ (*high) = (u32)(__val >> 32); \
__err; \ __err; \
}) })
@ -208,7 +208,7 @@ do { \
#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ #define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
(u32)((val) >> 32)) (u32)((val) >> 32))
#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0) #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)

View file

@ -48,6 +48,5 @@
* arch/x86/kernel/head_64.S), and it is mapped here: * arch/x86/kernel/head_64.S), and it is mapped here:
*/ */
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
#endif /* _ASM_X86_PAGE_64_DEFS_H */ #endif /* _ASM_X86_PAGE_64_DEFS_H */

View file

@ -144,10 +144,10 @@ void __init x86_64_start_kernel(char * real_mode_data)
* Build-time sanity checks on the kernel image and module * Build-time sanity checks on the kernel image and module
* area mappings. (these are purely build-time and produce no code) * area mappings. (these are purely build-time and produce no code)
*/ */
BUILD_BUG_ON(MODULES_VADDR < KERNEL_IMAGE_START); BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
BUILD_BUG_ON(MODULES_VADDR-KERNEL_IMAGE_START < KERNEL_IMAGE_SIZE); BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
BUILD_BUG_ON((KERNEL_IMAGE_START & ~PMD_MASK) != 0); BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==

View file

@ -354,18 +354,22 @@ static void ati_force_hpet_resume(void)
static u32 ati_ixp4x0_rev(struct pci_dev *dev) static u32 ati_ixp4x0_rev(struct pci_dev *dev)
{ {
u32 d; int err = 0;
u8 b; u32 d = 0;
u8 b = 0;
pci_read_config_byte(dev, 0xac, &b); err = pci_read_config_byte(dev, 0xac, &b);
b &= ~(1<<5); b &= ~(1<<5);
pci_write_config_byte(dev, 0xac, b); err |= pci_write_config_byte(dev, 0xac, b);
pci_read_config_dword(dev, 0x70, &d); err |= pci_read_config_dword(dev, 0x70, &d);
d |= 1<<8; d |= 1<<8;
pci_write_config_dword(dev, 0x70, d); err |= pci_write_config_dword(dev, 0x70, d);
pci_read_config_dword(dev, 0x8, &d); err |= pci_read_config_dword(dev, 0x8, &d);
d &= 0xff; d &= 0xff;
dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d); dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
WARN_ON_ONCE(err);
return d; return d;
} }

View file

@ -61,7 +61,7 @@ ENTRY(csum_partial)
testl $3, %esi # Check alignment. testl $3, %esi # Check alignment.
jz 2f # Jump if alignment is ok. jz 2f # Jump if alignment is ok.
testl $1, %esi # Check alignment. testl $1, %esi # Check alignment.
jz 10f # Jump if alignment is boundary of 2bytes. jz 10f # Jump if alignment is boundary of 2 bytes.
# buf is odd # buf is odd
dec %ecx dec %ecx

View file

@ -26,7 +26,7 @@ void *memmove(void *dest, const void *src, size_t n)
char *ret = dest; char *ret = dest;
__asm__ __volatile__( __asm__ __volatile__(
/* Handle more 16bytes in loop */ /* Handle more 16 bytes in loop */
"cmp $0x10, %0\n\t" "cmp $0x10, %0\n\t"
"jb 1f\n\t" "jb 1f\n\t"
@ -51,7 +51,7 @@ void *memmove(void *dest, const void *src, size_t n)
"sub $0x10, %0\n\t" "sub $0x10, %0\n\t"
/* /*
* We gobble 16byts forward in each loop. * We gobble 16 bytes forward in each loop.
*/ */
"3:\n\t" "3:\n\t"
"sub $0x10, %0\n\t" "sub $0x10, %0\n\t"
@ -117,7 +117,7 @@ void *memmove(void *dest, const void *src, size_t n)
"sub $0x10, %0\n\t" "sub $0x10, %0\n\t"
/* /*
* We gobble 16byts backward in each loop. * We gobble 16 bytes backward in each loop.
*/ */
"7:\n\t" "7:\n\t"
"sub $0x10, %0\n\t" "sub $0x10, %0\n\t"

View file

@ -98,7 +98,7 @@ ENTRY(memcpy)
subq $0x20, %rdx subq $0x20, %rdx
/* /*
* At most 3 ALU operations in one cycle, * At most 3 ALU operations in one cycle,
* so append NOPS in the same 16bytes trunk. * so append NOPS in the same 16 bytes trunk.
*/ */
.p2align 4 .p2align 4
.Lcopy_backward_loop: .Lcopy_backward_loop:

View file

@ -27,7 +27,7 @@
ENTRY(memmove) ENTRY(memmove)
CFI_STARTPROC CFI_STARTPROC
/* Handle more 32bytes in loop */ /* Handle more 32 bytes in loop */
mov %rdi, %rax mov %rdi, %rax
cmp $0x20, %rdx cmp $0x20, %rdx
jb 1f jb 1f
@ -56,7 +56,7 @@ ENTRY(memmove)
3: 3:
sub $0x20, %rdx sub $0x20, %rdx
/* /*
* We gobble 32byts forward in each loop. * We gobble 32 bytes forward in each loop.
*/ */
5: 5:
sub $0x20, %rdx sub $0x20, %rdx
@ -122,7 +122,7 @@ ENTRY(memmove)
addq %rdx, %rdi addq %rdx, %rdi
subq $0x20, %rdx subq $0x20, %rdx
/* /*
* We gobble 32byts backward in each loop. * We gobble 32 bytes backward in each loop.
*/ */
8: 8:
subq $0x20, %rdx subq $0x20, %rdx