Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile

Pull tile arch changes from Chris Metcalf:
 "These are some minor new feature work and other changes that didn't
  merit getting pushed up after the 3.9 merge window closed.

  There should be a lot more activity in the 3.11 merge window"

* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
  arch/tile: Fix syscall return value passed to tracepoint
  tile: comment assumption about __insn_mtspr for <asm/irqflags.h>
  tile: ns2cycles should use __raw_get_cpu_var
  arch: remove KCORE_ELF again [tile]
  tile: remove two outdated Kconfig entries
  tile: support atomic64_dec_if_positive()
  tile: support TIF_SYSCALL_TRACEPOINT; select HAVE_SYSCALL_TRACEPOINTS
  tile: Add definition of NR_syscalls
  tile: move declaration of sys_call_table to <asm/syscall.h>
  arch/tile: Enable HAVE_ARCH_TRACEHOOK
  arch/tile: Call tracehook_report_syscall_{entry,exit} in syscall trace
This commit is contained in:
Linus Torvalds 2013-05-01 10:48:34 -07:00
commit 8a72f3820c
11 changed files with 95 additions and 51 deletions

View File

@ -22,6 +22,9 @@ config TILE
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
select HAVE_ARCH_TRACEHOOK
select HAVE_SYSCALL_TRACEPOINTS
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
# FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT
@ -40,9 +43,6 @@ config MMU
config GENERIC_CSUM
def_bool y
config SEMAPHORE_SLEEPERS
def_bool y
config HAVE_ARCH_ALLOC_REMAP
def_bool y
@ -67,12 +67,6 @@ config HUGETLB_SUPER_PAGES
config RWSEM_GENERIC_SPINLOCK
def_bool y
# We have a very flat architecture from a migration point of view,
# so save boot time by presetting this (particularly useful on tile-sim).
config DEFAULT_MIGRATION_COST
int
default "10000000"
# We only support gcc 4.4 and above, so this should work.
config ARCH_SUPPORTS_OPTIMIZED_INLINING
def_bool y
@ -413,11 +407,6 @@ endmenu
menu "Executable file formats"
# only elf supported
config KCORE_ELF
def_bool y
depends on PROC_FS
source "fs/Kconfig.binfmt"
endmenu

View File

@ -131,4 +131,25 @@ static inline int atomic_read(const atomic_t *v)
#include <asm/atomic_64.h>
#endif
#ifndef __ASSEMBLY__
static inline long long atomic64_dec_if_positive(atomic64_t *v)
{
long long c, old, dec;
c = atomic64_read(v);
for (;;) {
dec = c - 1;
if (unlikely(dec < 0))
break;
old = atomic64_cmpxchg((v), c, dec);
if (likely(old == c))
break;
c = old;
}
return dec;
}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_TILE_ATOMIC_H */

View File

@ -44,7 +44,8 @@ typedef unsigned long pt_reg_t;
struct pt_regs *get_pt_regs(struct pt_regs *);
/* Trace the current syscall. */
extern void do_syscall_trace(void);
extern int do_syscall_trace_enter(struct pt_regs *regs);
extern void do_syscall_trace_exit(struct pt_regs *regs);
#define arch_has_single_step() (1)

View File

@ -22,6 +22,12 @@
#include <linux/err.h>
#include <arch/abi.h>
/* The array of function pointers for syscalls. */
extern void *sys_call_table[];
#ifdef CONFIG_COMPAT
extern void *compat_sys_call_table[];
#endif
/*
* Only the low 32 bits of orig_r0 are meaningful, so we return int.
* This importantly ignores the high bits on 64-bit, so comparisons

View File

@ -24,12 +24,6 @@
#include <linux/types.h>
#include <linux/compat.h>
/* The array of function pointers for syscalls. */
extern void *sys_call_table[];
#ifdef CONFIG_COMPAT
extern void *compat_sys_call_table[];
#endif
/*
* Note that by convention, any syscall which requires the current
* register set takes an additional "struct pt_regs *" pointer; a

View File

@ -124,6 +124,7 @@ extern void _cpu_idle(void);
#define TIF_SECCOMP 6 /* secure computing */
#define TIF_MEMDIE 7 /* OOM killer at work */
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
#define TIF_SYSCALL_TRACEPOINT 9 /* syscall tracepoint instrumentation */
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
@ -134,12 +135,19 @@ extern void _cpu_idle(void);
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_MEMDIE (1<<TIF_MEMDIE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
/* Work to do on any return to user space. */
#define _TIF_ALLWORK_MASK \
(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|\
_TIF_ASYNC_TLB|_TIF_NOTIFY_RESUME)
/* Work to do at syscall entry. */
#define _TIF_SYSCALL_ENTRY_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
/* Work to do at syscall exit. */
#define _TIF_SYSCALL_EXIT_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT)
/*
* Thread-synchronous status.
*

View File

@ -20,6 +20,8 @@
/* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h>
#define NR_syscalls __NR_syscalls
/* Additional Tilera-specific syscalls. */
#define __NR_cacheflush (__NR_arch_specific_syscall + 1)
__SYSCALL(__NR_cacheflush, sys_cacheflush)

View File

@ -1201,7 +1201,10 @@ handle_syscall:
lw r30, r31
andi r30, r30, _TIF_SYSCALL_TRACE
bzt r30, .Lrestore_syscall_regs
jal do_syscall_trace
{
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
jal do_syscall_trace_enter
}
FEEDBACK_REENTER(handle_syscall)
/*
@ -1252,7 +1255,10 @@ handle_syscall:
lw r30, r31
andi r30, r30, _TIF_SYSCALL_TRACE
bzt r30, 1f
jal do_syscall_trace
{
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
jal do_syscall_trace_exit
}
FEEDBACK_REENTER(handle_syscall)
1: {
movei r30, 0 /* not an NMI */

View File

@ -1000,13 +1000,19 @@ handle_syscall:
/* Trace syscalls, if requested. */
addi r31, r31, THREAD_INFO_FLAGS_OFFSET
ld r30, r31
andi r30, r30, _TIF_SYSCALL_TRACE
{
ld r30, r31
moveli r32, _TIF_SYSCALL_ENTRY_WORK
}
and r30, r30, r32
{
addi r30, r31, THREAD_INFO_STATUS_OFFSET - THREAD_INFO_FLAGS_OFFSET
beqzt r30, .Lrestore_syscall_regs
}
jal do_syscall_trace
{
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
jal do_syscall_trace_enter
}
FEEDBACK_REENTER(handle_syscall)
/*
@ -1071,13 +1077,19 @@ handle_syscall:
FEEDBACK_REENTER(handle_syscall)
/* Do syscall trace again, if requested. */
ld r30, r31
andi r0, r30, _TIF_SYSCALL_TRACE
{
ld r30, r31
moveli r32, _TIF_SYSCALL_EXIT_WORK
}
and r0, r30, r32
{
andi r0, r30, _TIF_SINGLESTEP
beqzt r0, 1f
}
jal do_syscall_trace
{
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
jal do_syscall_trace_exit
}
FEEDBACK_REENTER(handle_syscall)
andi r0, r30, _TIF_SINGLESTEP

View File

@ -21,9 +21,13 @@
#include <linux/uaccess.h>
#include <linux/regset.h>
#include <linux/elf.h>
#include <linux/tracehook.h>
#include <asm/traps.h>
#include <arch/chip.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
void user_enable_single_step(struct task_struct *child)
{
set_tsk_thread_flag(child, TIF_SINGLESTEP);
@ -246,29 +250,26 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
#endif
void do_syscall_trace(void)
int do_syscall_trace_enter(struct pt_regs *regs)
{
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
/*
* The 0x80 provides a way for the tracing parent to distinguish
* between a syscall stop and SIGTRAP delivery
*/
ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
if (test_thread_flag(TIF_SYSCALL_TRACE)) {
if (tracehook_report_syscall_entry(regs))
regs->regs[TREG_SYSCALL_NR] = -1;
}
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, regs->regs[TREG_SYSCALL_NR]);
return regs->regs[TREG_SYSCALL_NR];
}
void do_syscall_trace_exit(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_exit(regs, regs->regs[0]);
}
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)

View File

@ -230,6 +230,10 @@ int setup_profiling_timer(unsigned int multiplier)
*/
cycles_t ns2cycles(unsigned long nsecs)
{
struct clock_event_device *dev = &__get_cpu_var(tile_timer);
/*
* We do not have to disable preemption here as each core has the same
* clock frequency.
*/
struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer);
return ((u64)nsecs * dev->mult) >> dev->shift;
}