linux-stable/scripts/sorttable.c

454 lines
9.4 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* sorttable.c: Sort the kernel's table
*
* Added ORC unwind tables sort support and other updates:
* Copyright (C) 1999-2019 Alibaba Group Holding Limited. by:
* Shile Zhang <shile.zhang@linux.alibaba.com>
*
* Copyright 2011 - 2012 Cavium, Inc.
*
* Based on code taken from recortmcount.c which is:
*
* Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
*
* Restructured to fit Linux format, as well as other updates:
* Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
*/
/*
* Strategy: alter the vmlinux file in-place.
*/
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <getopt.h>
#include <elf.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <tools/be_byteshift.h>
#include <tools/le_byteshift.h>
#ifndef EM_ARCOMPACT
#define EM_ARCOMPACT 93
#endif
#ifndef EM_XTENSA
#define EM_XTENSA 94
#endif
#ifndef EM_AARCH64
#define EM_AARCH64 183
#endif
#ifndef EM_MICROBLAZE
#define EM_MICROBLAZE 189
#endif
#ifndef EM_ARCV2
#define EM_ARCV2 195
#endif
#ifndef EM_RISCV
#define EM_RISCV 243
#endif
static uint32_t (*r)(const uint32_t *);
static uint16_t (*r2)(const uint16_t *);
static uint64_t (*r8)(const uint64_t *);
static void (*w)(uint32_t, uint32_t *);
static void (*w2)(uint16_t, uint16_t *);
static void (*w8)(uint64_t, uint64_t *);
typedef void (*table_sort_t)(char *, int);
/*
* Get the whole file as a programming convenience in order to avoid
* malloc+lseek+read+free of many pieces. If successful, then mmap
* avoids copying unused pieces; else just read the whole file.
* Open for both read and write.
*/
static void *mmap_file(char const *fname, size_t *size)
{
int fd;
struct stat sb;
void *addr = NULL;
fd = open(fname, O_RDWR);
if (fd < 0) {
perror(fname);
return NULL;
}
if (fstat(fd, &sb) < 0) {
perror(fname);
goto out;
}
if (!S_ISREG(sb.st_mode)) {
fprintf(stderr, "not a regular file: %s\n", fname);
goto out;
}
addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
if (addr == MAP_FAILED) {
fprintf(stderr, "Could not mmap file: %s\n", fname);
goto out;
}
*size = sb.st_size;
out:
close(fd);
return addr;
}
static uint32_t rbe(const uint32_t *x)
{
return get_unaligned_be32(x);
}
static uint16_t r2be(const uint16_t *x)
{
return get_unaligned_be16(x);
}
static uint64_t r8be(const uint64_t *x)
{
return get_unaligned_be64(x);
}
static uint32_t rle(const uint32_t *x)
{
return get_unaligned_le32(x);
}
static uint16_t r2le(const uint16_t *x)
{
return get_unaligned_le16(x);
}
static uint64_t r8le(const uint64_t *x)
{
return get_unaligned_le64(x);
}
static void wbe(uint32_t val, uint32_t *x)
{
put_unaligned_be32(val, x);
}
static void w2be(uint16_t val, uint16_t *x)
{
put_unaligned_be16(val, x);
}
static void w8be(uint64_t val, uint64_t *x)
{
put_unaligned_be64(val, x);
}
static void wle(uint32_t val, uint32_t *x)
{
put_unaligned_le32(val, x);
}
static void w2le(uint16_t val, uint16_t *x)
{
put_unaligned_le16(val, x);
}
static void w8le(uint64_t val, uint64_t *x)
{
put_unaligned_le64(val, x);
}
/*
* Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
* the way to -256..-1, to avoid conflicting with real section
* indices.
*/
#define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1))
static inline int is_shndx_special(unsigned int i)
{
return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
}
/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
static inline unsigned int get_secindex(unsigned int shndx,
unsigned int sym_offs,
const Elf32_Word *symtab_shndx_start)
{
if (is_shndx_special(shndx))
return SPECIAL(shndx);
if (shndx != SHN_XINDEX)
return shndx;
return r(&symtab_shndx_start[sym_offs]);
}
/* 32 bit and 64 bit are very similar */
#include "sorttable.h"
#define SORTTABLE_64
#include "sorttable.h"
static int compare_relative_table(const void *a, const void *b)
{
int32_t av = (int32_t)r(a);
int32_t bv = (int32_t)r(b);
if (av < bv)
return -1;
if (av > bv)
return 1;
return 0;
}
static void sort_relative_table(char *extab_image, int image_size)
{
int i = 0;
/*
* Do the same thing the runtime sort does, first normalize to
* being relative to the start of the section.
*/
while (i < image_size) {
uint32_t *loc = (uint32_t *)(extab_image + i);
w(r(loc) + i, loc);
i += 4;
}
qsort(extab_image, image_size / 8, 8, compare_relative_table);
/* Now denormalize. */
i = 0;
while (i < image_size) {
uint32_t *loc = (uint32_t *)(extab_image + i);
w(r(loc) - i, loc);
i += 4;
}
}
arm64: extable: add `type` and `data` fields Subsequent patches will add specialized handlers for fixups, in addition to the simple PC fixup and BPF handlers we have today. In preparation, this patch adds a new `type` field to struct exception_table_entry, and uses this to distinguish the fixup and BPF cases. A `data` field is also added so that subsequent patches can associate data specific to each exception site (e.g. register numbers). Handlers are named ex_handler_*() for consistency, following the exmaple of x86. At the same time, get_ex_fixup() is split out into a helper so that it can be used by other ex_handler_*() functions ins subsequent patches. This patch will increase the size of the exception tables, which will be remedied by subsequent patches removing redundant fixup code. There should be no functional change as a result of this patch. Since each entry is now 12 bytes in size, we must reduce the alignment of each entry from `.align 3` (i.e. 8 bytes) to `.align 2` (i.e. 4 bytes), which is the natrual alignment of the `insn` and `fixup` fields. The current 8-byte alignment is a holdover from when the `insn` and `fixup` fields was 8 bytes, and while not harmful has not been necessary since commit: 6c94f27ac847ff8e ("arm64: switch to relative exception tables") Similarly, RO_EXCEPTION_TABLE_ALIGN is dropped to 4 bytes. Concurrently with this patch, x86's exception table entry format is being updated (similarly to a 12-byte format, with 32-bytes of absolute data). Once both have been merged it should be possible to unify the sorttable logic for the two. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: James Morse <james.morse@arm.com> Cc: Jean-Philippe Brucker <jean-philippe@linaro.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20211019160219.5202-11-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-10-19 16:02:16 +00:00
static void arm64_sort_relative_table(char *extab_image, int image_size)
{
int i = 0;
while (i < image_size) {
uint32_t *loc = (uint32_t *)(extab_image + i);
w(r(loc) + i, loc);
w(r(loc + 1) + i + 4, loc + 1);
/* Don't touch the fixup type or data */
i += sizeof(uint32_t) * 3;
}
qsort(extab_image, image_size / 12, 12, compare_relative_table);
i = 0;
while (i < image_size) {
uint32_t *loc = (uint32_t *)(extab_image + i);
w(r(loc) - i, loc);
w(r(loc + 1) - (i + 4), loc + 1);
/* Don't touch the fixup type or data */
i += sizeof(uint32_t) * 3;
}
}
static void x86_sort_relative_table(char *extab_image, int image_size)
{
int i = 0;
while (i < image_size) {
uint32_t *loc = (uint32_t *)(extab_image + i);
w(r(loc) + i, loc);
w(r(loc + 1) + i + 4, loc + 1);
x86/extable: Rework the exception table mechanics The exception table entries contain the instruction address, the fixup address and the handler address. All addresses are relative. Storing the handler address has a few downsides: 1) Most handlers need to be exported 2) Handlers can be defined everywhere and there is no overview about the handler types 3) MCE needs to check the handler type to decide whether an in kernel #MC can be recovered. The functionality of the handler itself is not in any way special, but for these checks there need to be separate functions which in the worst case have to be exported. Some of these 'recoverable' exception fixups are pretty obscure and just reuse some other handler to spare code. That obfuscates e.g. the #MC safe copy functions. Cleaning that up would require more handlers and exports Rework the exception fixup mechanics by storing a fixup type number instead of the handler address and invoke the proper handler for each fixup type. Also teach the extable sort to leave the type field alone. This makes most handlers static except for special cases like the MCE MSR fixup and the BPF fixup. This allows to add more types for cleaning up the obscure places without adding more handler code and exports. There is a marginal code size reduction for a production config and it removes _eight_ exported symbols. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Alexei Starovoitov <ast@kernel.org> Link: https://lkml.kernel.org/r/20210908132525.211958725@linutronix.de
2021-09-08 13:29:18 +00:00
/* Don't touch the fixup type */
i += sizeof(uint32_t) * 3;
}
qsort(extab_image, image_size / 12, 12, compare_relative_table);
i = 0;
while (i < image_size) {
uint32_t *loc = (uint32_t *)(extab_image + i);
w(r(loc) - i, loc);
w(r(loc + 1) - (i + 4), loc + 1);
x86/extable: Rework the exception table mechanics The exception table entries contain the instruction address, the fixup address and the handler address. All addresses are relative. Storing the handler address has a few downsides: 1) Most handlers need to be exported 2) Handlers can be defined everywhere and there is no overview about the handler types 3) MCE needs to check the handler type to decide whether an in kernel #MC can be recovered. The functionality of the handler itself is not in any way special, but for these checks there need to be separate functions which in the worst case have to be exported. Some of these 'recoverable' exception fixups are pretty obscure and just reuse some other handler to spare code. That obfuscates e.g. the #MC safe copy functions. Cleaning that up would require more handlers and exports Rework the exception fixup mechanics by storing a fixup type number instead of the handler address and invoke the proper handler for each fixup type. Also teach the extable sort to leave the type field alone. This makes most handlers static except for special cases like the MCE MSR fixup and the BPF fixup. This allows to add more types for cleaning up the obscure places without adding more handler code and exports. There is a marginal code size reduction for a production config and it removes _eight_ exported symbols. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Alexei Starovoitov <ast@kernel.org> Link: https://lkml.kernel.org/r/20210908132525.211958725@linutronix.de
2021-09-08 13:29:18 +00:00
/* Don't touch the fixup type */
i += sizeof(uint32_t) * 3;
}
}
static void s390_sort_relative_table(char *extab_image, int image_size)
{
int i;
for (i = 0; i < image_size; i += 16) {
char *loc = extab_image + i;
uint64_t handler;
w(r((uint32_t *)loc) + i, (uint32_t *)loc);
w(r((uint32_t *)(loc + 4)) + (i + 4), (uint32_t *)(loc + 4));
/*
* 0 is a special self-relative handler value, which means that
* handler should be ignored. It is safe, because it means that
* handler field points to itself, which should never happen.
* When creating extable-relative values, keep it as 0, since
* this should never occur either: it would mean that handler
* field points to the first extable entry.
*/
handler = r8((uint64_t *)(loc + 8));
if (handler)
handler += i + 8;
w8(handler, (uint64_t *)(loc + 8));
}
qsort(extab_image, image_size / 16, 16, compare_relative_table);
for (i = 0; i < image_size; i += 16) {
char *loc = extab_image + i;
uint64_t handler;
w(r((uint32_t *)loc) - i, (uint32_t *)loc);
w(r((uint32_t *)(loc + 4)) - (i + 4), (uint32_t *)(loc + 4));
handler = r8((uint64_t *)(loc + 8));
if (handler)
handler -= i + 8;
w8(handler, (uint64_t *)(loc + 8));
}
}
static int do_file(char const *const fname, void *addr)
{
int rc = -1;
Elf32_Ehdr *ehdr = addr;
table_sort_t custom_sort = NULL;
switch (ehdr->e_ident[EI_DATA]) {
case ELFDATA2LSB:
r = rle;
r2 = r2le;
r8 = r8le;
w = wle;
w2 = w2le;
w8 = w8le;
break;
case ELFDATA2MSB:
r = rbe;
r2 = r2be;
r8 = r8be;
w = wbe;
w2 = w2be;
w8 = w8be;
break;
default:
fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
ehdr->e_ident[EI_DATA], fname);
return -1;
}
if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 ||
(r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN) ||
ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
return -1;
}
switch (r2(&ehdr->e_machine)) {
case EM_386:
case EM_X86_64:
custom_sort = x86_sort_relative_table;
break;
case EM_S390:
custom_sort = s390_sort_relative_table;
break;
case EM_AARCH64:
arm64: extable: add `type` and `data` fields Subsequent patches will add specialized handlers for fixups, in addition to the simple PC fixup and BPF handlers we have today. In preparation, this patch adds a new `type` field to struct exception_table_entry, and uses this to distinguish the fixup and BPF cases. A `data` field is also added so that subsequent patches can associate data specific to each exception site (e.g. register numbers). Handlers are named ex_handler_*() for consistency, following the exmaple of x86. At the same time, get_ex_fixup() is split out into a helper so that it can be used by other ex_handler_*() functions ins subsequent patches. This patch will increase the size of the exception tables, which will be remedied by subsequent patches removing redundant fixup code. There should be no functional change as a result of this patch. Since each entry is now 12 bytes in size, we must reduce the alignment of each entry from `.align 3` (i.e. 8 bytes) to `.align 2` (i.e. 4 bytes), which is the natrual alignment of the `insn` and `fixup` fields. The current 8-byte alignment is a holdover from when the `insn` and `fixup` fields was 8 bytes, and while not harmful has not been necessary since commit: 6c94f27ac847ff8e ("arm64: switch to relative exception tables") Similarly, RO_EXCEPTION_TABLE_ALIGN is dropped to 4 bytes. Concurrently with this patch, x86's exception table entry format is being updated (similarly to a 12-byte format, with 32-bytes of absolute data). Once both have been merged it should be possible to unify the sorttable logic for the two. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: James Morse <james.morse@arm.com> Cc: Jean-Philippe Brucker <jean-philippe@linaro.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20211019160219.5202-11-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
2021-10-19 16:02:16 +00:00
custom_sort = arm64_sort_relative_table;
break;
case EM_PARISC:
case EM_PPC:
case EM_PPC64:
custom_sort = sort_relative_table;
break;
case EM_ARCOMPACT:
case EM_ARCV2:
case EM_ARM:
case EM_MICROBLAZE:
case EM_MIPS:
case EM_RISCV:
case EM_XTENSA:
break;
default:
fprintf(stderr, "unrecognized e_machine %d %s\n",
r2(&ehdr->e_machine), fname);
return -1;
}
switch (ehdr->e_ident[EI_CLASS]) {
case ELFCLASS32:
if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr) ||
r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
fprintf(stderr,
"unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
break;
}
rc = do_sort_32(ehdr, fname, custom_sort);
break;
case ELFCLASS64:
{
Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr) ||
r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
fprintf(stderr,
"unrecognized ET_EXEC/ET_DYN file: %s\n",
fname);
break;
}
rc = do_sort_64(ghdr, fname, custom_sort);
}
break;
default:
fprintf(stderr, "unrecognized ELF class %d %s\n",
ehdr->e_ident[EI_CLASS], fname);
break;
}
return rc;
}
int main(int argc, char *argv[])
{
int i, n_error = 0; /* gcc-4.3.0 false positive complaint */
size_t size = 0;
void *addr = NULL;
if (argc < 2) {
fprintf(stderr, "usage: sorttable vmlinux...\n");
return 0;
}
/* Process each file in turn, allowing deep failure. */
for (i = 1; i < argc; i++) {
addr = mmap_file(argv[i], &size);
if (!addr) {
++n_error;
continue;
}
if (do_file(argv[i], addr))
++n_error;
munmap(addr, size);
}
return !!n_error;
}