x86: move exports to actual definitions

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2016-01-11 11:04:34 -05:00
parent 22823ab419
commit 784d5699ed
27 changed files with 68 additions and 150 deletions

View File

@ -44,6 +44,7 @@
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
.section .entry.text, "ax"
@ -955,6 +956,7 @@ trace:
jmp ftrace_stub
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
EXPORT_SYMBOL(mcount)
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER

View File

@ -35,6 +35,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/pgtable_types.h>
#include <asm/export.h>
#include <linux/err.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
@ -785,6 +786,7 @@ ENTRY(native_load_gs_index)
popfq
ret
END(native_load_gs_index)
EXPORT_SYMBOL(native_load_gs_index)
_ASM_EXTABLE(.Lgs_change, bad_gs)
.section .fixup, "ax"

View File

@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/export.h>
/* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0
@ -36,5 +37,7 @@
#ifdef CONFIG_PREEMPT
THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
EXPORT_SYMBOL(___preempt_schedule)
EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif

View File

@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include "calling.h"
#include <asm/asm.h>
#include <asm/export.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
@ -49,6 +50,8 @@
#ifdef CONFIG_PREEMPT
THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
EXPORT_SYMBOL(___preempt_schedule)
EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif
#if defined(CONFIG_TRACE_IRQFLAGS) \

View File

@ -0,0 +1,4 @@
#ifdef CONFIG_64BIT
#define KSYM_ALIGN 16
#endif
#include <asm-generic/export.h>

View File

@ -46,9 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o
obj-$(CONFIG_X86_32) += i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += mcount_64.o
obj-$(CONFIG_X86_64) += sys_x86_64.o mcount_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o

View File

@ -23,6 +23,7 @@
#include <asm/percpu.h>
#include <asm/nops.h>
#include <asm/bootparam.h>
#include <asm/export.h>
/* Physical address */
#define pa(X) ((X) - __PAGE_OFFSET)
@ -673,6 +674,7 @@ ENTRY(empty_zero_page)
.fill 4096,1,0
ENTRY(swapper_pg_dir)
.fill 1024,4,0
EXPORT_SYMBOL(empty_zero_page)
/*
* This starts the data section.

View File

@ -21,6 +21,7 @@
#include <asm/percpu.h>
#include <asm/nops.h>
#include "../entry/calling.h"
#include <asm/export.h>
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
@ -488,10 +489,12 @@ early_gdt_descr_base:
ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000
EXPORT_SYMBOL(phys_base)
#include "../../x86/xen/xen-head.S"
__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
EXPORT_SYMBOL(empty_zero_page)

View File

@ -1,47 +0,0 @@
#include <linux/export.h>
#include <linux/spinlock_types.h>
#include <asm/checksum.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
/* mcount is defined in assembly */
EXPORT_SYMBOL(mcount);
#endif
/*
* Note, this is a prototype to get at the symbol for
* the export, but dont use it from C code, it is used
* by assembly code and is not using C calling convention!
*/
#ifndef CONFIG_X86_CMPXCHG64
extern void cmpxchg8b_emu(void);
EXPORT_SYMBOL(cmpxchg8b_emu);
#endif
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
EXPORT_SYMBOL(__get_user_8);
EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(___preempt_schedule);
EXPORT_SYMBOL(___preempt_schedule_notrace);
#endif
EXPORT_SYMBOL(__sw_hweight32);

View File

@ -7,6 +7,7 @@
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/ftrace.h>
#include <asm/export.h>
.code64
@ -294,6 +295,7 @@ trace:
jmp fgraph_trace
END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
EXPORT_SYMBOL(function_hook)
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER

View File

@ -1,85 +0,0 @@
/* Exports for assembly files.
All C exports should go in the respective C files. */
#include <linux/export.h>
#include <linux/spinlock_types.h>
#include <linux/smp.h>
#include <net/checksum.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
/* mcount and __fentry__ are defined in assembly */
#ifdef CC_USING_FENTRY
EXPORT_SYMBOL(__fentry__);
#else
EXPORT_SYMBOL(mcount);
#endif
#endif
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
EXPORT_SYMBOL(__get_user_8);
EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(copy_user_generic_string);
EXPORT_SYMBOL(copy_user_generic_unrolled);
EXPORT_SYMBOL(copy_user_enhanced_fast_string);
EXPORT_SYMBOL(__copy_user_nocache);
EXPORT_SYMBOL(_copy_from_user);
EXPORT_SYMBOL(_copy_to_user);
EXPORT_SYMBOL_GPL(memcpy_mcsafe);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(__sw_hweight32);
EXPORT_SYMBOL(__sw_hweight64);
/*
* Export string functions. We normally rely on gcc builtin for most of these,
* but gcc sometimes decides not to inline them.
*/
#undef memcpy
#undef memset
#undef memmove
extern void *__memset(void *, int, __kernel_size_t);
extern void *__memcpy(void *, const void *, __kernel_size_t);
extern void *__memmove(void *, const void *, __kernel_size_t);
extern void *memset(void *, int, __kernel_size_t);
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void *memmove(void *, const void *, __kernel_size_t);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memmove);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
#ifndef CONFIG_DEBUG_VIRTUAL
EXPORT_SYMBOL(phys_base);
#endif
EXPORT_SYMBOL(empty_zero_page);
#ifndef CONFIG_PARAVIRT
EXPORT_SYMBOL(native_load_gs_index);
#endif
#ifdef CONFIG_PREEMPT
EXPORT_SYMBOL(___preempt_schedule);
EXPORT_SYMBOL(___preempt_schedule_notrace);
#endif

View File

@ -28,6 +28,7 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/asm.h>
#include <asm/export.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
@ -251,6 +252,7 @@ ENTRY(csum_partial)
ENDPROC(csum_partial)
#endif
EXPORT_SYMBOL(csum_partial)
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst,
@ -490,3 +492,4 @@ ENDPROC(csum_partial_copy_generic)
#undef ROUND1
#endif
EXPORT_SYMBOL(csum_partial_copy_generic)

View File

@ -1,6 +1,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
/*
* Most CPUs support enhanced REP MOVSB/STOSB instructions. It is
@ -23,6 +24,7 @@ ENTRY(clear_page)
rep stosq
ret
ENDPROC(clear_page)
EXPORT_SYMBOL(clear_page)
ENTRY(clear_page_orig)

View File

@ -7,6 +7,7 @@
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
@ -48,3 +49,4 @@ ENTRY(cmpxchg8b_emu)
ret
ENDPROC(cmpxchg8b_emu)
EXPORT_SYMBOL(cmpxchg8b_emu)

View File

@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
/*
* Some CPUs run faster using the string copy instructions (sane microcode).
@ -17,6 +18,7 @@ ENTRY(copy_page)
rep movsq
ret
ENDPROC(copy_page)
EXPORT_SYMBOL(copy_page)
ENTRY(copy_page_regs)
subq $2*8, %rsp

View File

@ -14,6 +14,7 @@
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
/* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user)
@ -29,6 +30,7 @@ ENTRY(_copy_to_user)
"jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS
ENDPROC(_copy_to_user)
EXPORT_SYMBOL(_copy_to_user)
/* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user)
@ -44,6 +46,8 @@ ENTRY(_copy_from_user)
"jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS
ENDPROC(_copy_from_user)
EXPORT_SYMBOL(_copy_from_user)
.section .fixup,"ax"
/* must zero dest */
@ -155,6 +159,7 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b)
ENDPROC(copy_user_generic_unrolled)
EXPORT_SYMBOL(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions.
* This is also a lot simpler. Use them when possible.
@ -200,6 +205,7 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b)
ENDPROC(copy_user_generic_string)
EXPORT_SYMBOL(copy_user_generic_string)
/*
* Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
@ -229,6 +235,7 @@ ENTRY(copy_user_enhanced_fast_string)
_ASM_EXTABLE(1b,12b)
ENDPROC(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
/*
* copy_user_nocache - Uncached memory copy with exception handling
@ -379,3 +386,4 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE(40b,.L_fixup_1b_copy)
_ASM_EXTABLE(41b,.L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
EXPORT_SYMBOL(__copy_user_nocache)

View File

@ -135,6 +135,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
return (__force __wsum)add32_with_carry(do_csum(buff, len),
(__force u32)sum);
}
EXPORT_SYMBOL(csum_partial);
/*
* this routine is used for miscellaneous IP-like checksums, mainly

View File

@ -32,6 +32,7 @@
#include <asm/thread_info.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
.text
ENTRY(__get_user_1)
@ -44,6 +45,7 @@ ENTRY(__get_user_1)
ASM_CLAC
ret
ENDPROC(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
ENTRY(__get_user_2)
add $1,%_ASM_AX
@ -57,6 +59,7 @@ ENTRY(__get_user_2)
ASM_CLAC
ret
ENDPROC(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
ENTRY(__get_user_4)
add $3,%_ASM_AX
@ -70,6 +73,7 @@ ENTRY(__get_user_4)
ASM_CLAC
ret
ENDPROC(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
ENTRY(__get_user_8)
#ifdef CONFIG_X86_64
@ -97,6 +101,7 @@ ENTRY(__get_user_8)
ret
#endif
ENDPROC(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
bad_get_user:

View File

@ -1,4 +1,5 @@
#include <linux/linkage.h>
#include <asm/export.h>
#include <asm/asm.h>
@ -32,6 +33,7 @@ ENTRY(__sw_hweight32)
__ASM_SIZE(pop,) %__ASM_REG(dx)
ret
ENDPROC(__sw_hweight32)
EXPORT_SYMBOL(__sw_hweight32)
ENTRY(__sw_hweight64)
#ifdef CONFIG_X86_64
@ -75,3 +77,4 @@ ENTRY(__sw_hweight64)
ret
#endif
ENDPROC(__sw_hweight64)
EXPORT_SYMBOL(__sw_hweight64)

View File

@ -4,6 +4,7 @@
#include <asm/errno.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
/*
* We build a jump to memcpy_orig by default which gets NOPped out on
@ -40,6 +41,8 @@ ENTRY(memcpy)
ret
ENDPROC(memcpy)
ENDPROC(__memcpy)
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(__memcpy)
/*
* memcpy_erms() - enhanced fast string memcpy. This is faster and
@ -274,6 +277,7 @@ ENTRY(memcpy_mcsafe)
xorq %rax, %rax
ret
ENDPROC(memcpy_mcsafe)
EXPORT_SYMBOL_GPL(memcpy_mcsafe)
.section .fixup, "ax"
/* Return -EFAULT for any failure */

View File

@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
#undef memmove
@ -207,3 +208,5 @@ ENTRY(__memmove)
retq
ENDPROC(__memmove)
ENDPROC(memmove)
EXPORT_SYMBOL(__memmove)
EXPORT_SYMBOL(memmove)

View File

@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/export.h>
.weak memset
@ -43,6 +44,8 @@ ENTRY(__memset)
ret
ENDPROC(memset)
ENDPROC(__memset)
EXPORT_SYMBOL(memset)
EXPORT_SYMBOL(__memset)
/*
* ISO C memset - set a memory block to a byte value. This function uses

View File

@ -15,6 +15,7 @@
#include <asm/errno.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
/*
@ -43,6 +44,7 @@ ENTRY(__put_user_1)
xor %eax,%eax
EXIT
ENDPROC(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
ENTRY(__put_user_2)
ENTER
@ -55,6 +57,7 @@ ENTRY(__put_user_2)
xor %eax,%eax
EXIT
ENDPROC(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
ENTRY(__put_user_4)
ENTER
@ -67,6 +70,7 @@ ENTRY(__put_user_4)
xor %eax,%eax
EXIT
ENDPROC(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
ENTRY(__put_user_8)
ENTER
@ -82,6 +86,7 @@ ENTRY(__put_user_8)
xor %eax,%eax
EXIT
ENDPROC(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
bad_put_user:
movl $-EFAULT,%eax

View File

@ -1,4 +1,5 @@
#include <linux/string.h>
#include <linux/export.h>
char *strstr(const char *cs, const char *ct)
{
@ -28,4 +29,4 @@ __asm__ __volatile__(
: "dx", "di");
return __res;
}
EXPORT_SYMBOL(strstr);

View File

@ -8,7 +8,7 @@ else
BITS := 64
endif
obj-y = bug.o bugs_$(BITS).o delay.o fault.o ksyms.o ldt.o \
obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \
ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \
stub_$(BITS).o stub_segv.o \
sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \

View File

@ -27,6 +27,7 @@
#include <asm/errno.h>
#include <asm/asm.h>
#include <asm/export.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
@ -214,3 +215,4 @@ csum_partial:
ret
#endif
EXPORT_SYMBOL(csum_partial)

View File

@ -1,13 +0,0 @@
#include <linux/module.h>
#include <asm/string.h>
#include <asm/checksum.h>
#ifndef CONFIG_X86_32
/*XXX: we need them because they would be exported by x86_64 */
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
EXPORT_SYMBOL(memcpy);
#else
EXPORT_SYMBOL(__memcpy);
#endif
#endif
EXPORT_SYMBOL(csum_partial);