linux-stable/arch/arm/kernel/vmlinux.lds.S
David Brown 9973290ce2 ARM: 7428/1: Prevent KALLSYM size mismatch on ARM.
ARM builds seem to be plagued by an occasional build error:

    Inconsistent kallsyms data
    This is a bug - please report about it
    Try "make KALLSYMS_EXTRA_PASS=1" as a workaround

The problem has to do with alignment of some sections by the linker.
The kallsyms data is built in two passes by first linking the kernel
without it, and then linking the kernel again with the symbols
included.  Normally, this just shifts the symbols, without changing
their order, and the compression used by the kallsyms gives the same
result.

On non SMP, the per CPU data is empty.  Depending on the where the
alignment ends up, it can come out as either:

   +-------------------+
   | last text segment |
   +-------------------+
   /* padding */
   +-------------------+     <- L1_CACHE_BYTES alignemnt
   | per cpu (empty)   |
   +-------------------+
__per_cpu_end:
   /* padding */
__data_loc:
   +-------------------+     <- THREAD_SIZE alignment
   | data              |
   +-------------------+

or

   +-------------------+
   | last text segment |
   +-------------------+
   /* padding */
   +-------------------+     <- L1_CACHE_BYTES alignemnt
   | per cpu (empty)   |
   +-------------------+
__per_cpu_end:
   /* no padding */
__data_loc:
   +-------------------+     <- THREAD_SIZE alignment
   | data              |
   +-------------------+

if the alignment satisfies both.  Because symbols that have the same
address are sorted by 'nm -n', the second case will be in a different
order than the first case.  This changes the compression, changing the
size of the kallsym data, causing the build failure.

The KALLSYMS_EXTRA_PASS=1 workaround usually works, but it is still
possible to have the alignment change between the second and third
pass.  It's probably even possible for it to never reach a fixedpoint.

The problem only occurs on non-SMP, when the per-cpu data is empty,
and when the data segment has alignment (and immediately follows the
text segments).  Fix this by only including the per_cpu section on
SMP, when it is not empty.

Signed-off-by: David Brown <davidb@codeaurora.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2012-06-22 22:54:18 +01:00

314 lines
6.1 KiB
ArmAsm

/* ld script to make ARM Linux kernel
* taken from the i386 version by Russell King
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
#define PROC_INFO \
. = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
*(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .;
#define IDMAP_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \
*(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .;
#ifdef CONFIG_HOTPLUG_CPU
#define ARM_CPU_DISCARD(x)
#define ARM_CPU_KEEP(x) x
#else
#define ARM_CPU_DISCARD(x) x
#define ARM_CPU_KEEP(x)
#endif
#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
defined(CONFIG_GENERIC_BUG)
#define ARM_EXIT_KEEP(x) x
#define ARM_EXIT_DISCARD(x)
#else
#define ARM_EXIT_KEEP(x)
#define ARM_EXIT_DISCARD(x) x
#endif
OUTPUT_ARCH(arm)
ENTRY(stext)
#ifndef __ARMEB__
jiffies = jiffies_64;
#else
jiffies = jiffies_64 + 4;
#endif
SECTIONS
{
/*
* XXX: The linker does not define how output sections are
* assigned to input sections when there are multiple statements
* matching the same input section name. There is no documented
* order of matching.
*
* unwind exit sections must be discarded before the rest of the
* unwind sections get included.
*/
/DISCARD/ : {
*(.ARM.exidx.exit.text)
*(.ARM.extab.exit.text)
ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
ARM_EXIT_DISCARD(EXIT_TEXT)
ARM_EXIT_DISCARD(EXIT_DATA)
EXIT_CALL
#ifndef CONFIG_HOTPLUG
*(.ARM.exidx.devexit.text)
*(.ARM.extab.devexit.text)
#endif
#ifndef CONFIG_MMU
*(.fixup)
*(__ex_table)
#endif
#ifndef CONFIG_SMP_ON_UP
*(.alt.smp.init)
#endif
*(.discard)
*(.discard.*)
}
#ifdef CONFIG_XIP_KERNEL
. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
#else
. = PAGE_OFFSET + TEXT_OFFSET;
#endif
.head.text : {
_text = .;
HEAD_TEXT
}
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
IRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
IDMAP_TEXT
#ifdef CONFIG_MMU
*(.fixup)
#endif
*(.gnu.warning)
*(.glue_7)
*(.glue_7t)
. = ALIGN(4);
*(.got) /* Global offset table */
ARM_CPU_KEEP(PROC_INFO)
}
RO_DATA(PAGE_SIZE)
#ifdef CONFIG_ARM_UNWIND
/*
* Stack unwinding tables
*/
. = ALIGN(8);
.ARM.unwind_idx : {
__start_unwind_idx = .;
*(.ARM.exidx*)
__stop_unwind_idx = .;
}
.ARM.unwind_tab : {
__start_unwind_tab = .;
*(.ARM.extab*)
__stop_unwind_tab = .;
}
#endif
_etext = .; /* End of text and rodata section */
#ifndef CONFIG_XIP_KERNEL
. = ALIGN(PAGE_SIZE);
__init_begin = .;
#endif
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
}
.init.proc.info : {
ARM_CPU_DISCARD(PROC_INFO)
}
.init.arch.info : {
__arch_info_begin = .;
*(.arch.info.init)
__arch_info_end = .;
}
.init.tagtable : {
__tagtable_begin = .;
*(.taglist.init)
__tagtable_end = .;
}
#ifdef CONFIG_SMP_ON_UP
.init.smpalt : {
__smpalt_begin = .;
*(.alt.smp.init)
__smpalt_end = .;
}
#endif
.init.pv_table : {
__pv_table_begin = .;
*(.pv_table)
__pv_table_end = .;
}
.init.data : {
#ifndef CONFIG_XIP_KERNEL
INIT_DATA
#endif
INIT_SETUP(16)
INIT_CALLS
CON_INITCALL
SECURITY_INITCALL
INIT_RAM_FS
}
#ifndef CONFIG_XIP_KERNEL
.exit.data : {
ARM_EXIT_KEEP(EXIT_DATA)
}
#endif
#ifdef CONFIG_SMP
PERCPU_SECTION(L1_CACHE_BYTES)
#endif
#ifdef CONFIG_XIP_KERNEL
__data_loc = ALIGN(4); /* location in binary */
. = PAGE_OFFSET + TEXT_OFFSET;
#else
__init_end = .;
. = ALIGN(THREAD_SIZE);
__data_loc = .;
#endif
.data : AT(__data_loc) {
_data = .; /* address in memory */
_sdata = .;
/*
* first, the init task union, aligned
* to an 8192 byte boundary.
*/
INIT_TASK_DATA(THREAD_SIZE)
#ifdef CONFIG_XIP_KERNEL
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_DATA
ARM_EXIT_KEEP(EXIT_DATA)
. = ALIGN(PAGE_SIZE);
__init_end = .;
#endif
NOSAVE_DATA
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
READ_MOSTLY_DATA(L1_CACHE_BYTES)
/*
* The exception fixup table (might need resorting at runtime)
*/
. = ALIGN(4);
__start___ex_table = .;
#ifdef CONFIG_MMU
*(__ex_table)
#endif
__stop___ex_table = .;
/*
* and the usual data section
*/
DATA_DATA
CONSTRUCTORS
_edata = .;
}
_edata_loc = __data_loc + SIZEOF(.data);
#ifdef CONFIG_HAVE_TCM
/*
* We align everything to a page boundary so we can
* free it after init has commenced and TCM contents have
* been copied to its destination.
*/
.tcm_start : {
. = ALIGN(PAGE_SIZE);
__tcm_start = .;
__itcm_start = .;
}
/*
* Link these to the ITCM RAM
* Put VMA to the TCM address and LMA to the common RAM
* and we'll upload the contents from RAM to TCM and free
* the used RAM after that.
*/
.text_itcm ITCM_OFFSET : AT(__itcm_start)
{
__sitcm_text = .;
*(.tcm.text)
*(.tcm.rodata)
. = ALIGN(4);
__eitcm_text = .;
}
/*
* Reset the dot pointer, this is needed to create the
* relative __dtcm_start below (to be used as extern in code).
*/
. = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
.dtcm_start : {
__dtcm_start = .;
}
/* TODO: add remainder of ITCM as well, that can be used for data! */
.data_dtcm DTCM_OFFSET : AT(__dtcm_start)
{
. = ALIGN(4);
__sdtcm_data = .;
*(.tcm.data)
. = ALIGN(4);
__edtcm_data = .;
}
/* Reset the dot pointer or the linker gets confused */
. = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
/* End marker for freeing TCM copy in linked object */
.tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
. = ALIGN(PAGE_SIZE);
__tcm_end = .;
}
#endif
NOTES
BSS_SECTION(0, 0, 0)
_end = .;
STABS_DEBUG
.comment 0 : { *(.comment) }
}
/*
* These must never be empty
* If you have to comment these two assert statements out, your
* binutils is too old (for other reasons as well)
*/
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")