From 25d3584797a39f57b69cd835722ac7c41113fb9a Mon Sep 17 00:00:00 2001 From: Will Deacon <will.deacon@arm.com> Date: Mon, 16 Aug 2010 15:15:14 +0100 Subject: ARM: 6330/1: perf: reword comments relating to perf_event_do_pending This is purely a cosmetic change to the ARM perf backend because the current comments about the relationship between NMIs, interrupt context and perf_event_do_pending are misleading. This patch updates the comments so that they reflect what the code actually does (which is in line with other architectures). Acked-by: Jamie Iles <jamie.iles@picochip.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> --- arch/arm/kernel/perf_event.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 417c392ddf1c..e2139256524e 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -1041,8 +1041,8 @@ armv6pmu_handle_irq(int irq_num, /* * Handle the pending perf events. * - * Note: this call *must* be run with interrupts enabled. For - * platforms that can have the PMU interrupts raised as a PMI, this + * Note: this call *must* be run with interrupts disabled. For + * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ perf_event_do_pending(); @@ -2017,8 +2017,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) /* * Handle the pending perf events. * - * Note: this call *must* be run with interrupts enabled. For - * platforms that can have the PMU interrupts raised as a PMI, this + * Note: this call *must* be run with interrupts disabled. For + * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ perf_event_do_pending(); -- cgit v1.2.3 From 418cf646c9999f2f7d216b1a693c2c575ab8094c Mon Sep 17 00:00:00 2001 From: Mikael Pettersson <mikpe@it.uu.se> Date: Wed, 25 Aug 2010 20:49:01 +0100 Subject: ARM: 6343/1: wire up fanotify and prlimit64 syscalls on ARM The 2.6.36-rc kernel added three new system calls: fanotify_init, fanotify_mark, and prlimit64. This patch wires them up on ARM. The only non-trivial issue here is the u64 argument to sys_fanotify_mark(), but it is the 3rd argument and thus passed in r2/r3 in both kernel and user space, so it causes no problems. Tested with a 2.6.36-rc2 EABI kernel on an ixp4xx machine. Tested-by: Anand Gadiyar <gadiyar@ti.com> Signed-off-by: Mikael Pettersson <mikpe@it.uu.se> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> --- arch/arm/kernel/calls.S | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index afeb71fa72cb..5c26eccef998 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -376,6 +376,9 @@ CALL(sys_perf_event_open) /* 365 */ CALL(sys_recvmmsg) CALL(sys_accept4) + CALL(sys_fanotify_init) + CALL(sys_fanotify_mark) + CALL(sys_prlimit64) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted -- cgit v1.2.3 From 65b4711ff513767341aa1915c822de6ec0de65cb Mon Sep 17 00:00:00 2001 From: Will Deacon <will.deacon@arm.com> Date: Thu, 2 Sep 2010 09:32:08 +0100 Subject: ARM: 6352/1: perf: fix event validation The validate_event function in the ARM perf events backend has the following problems: 1.) Events that are disabled count towards the cost. 2.) Events associated with other PMUs [for example, software events or breakpoints] do not count towards the cost, but do fail validation, causing the group to fail. This patch changes validate_event so that it ignores events in the PERF_EVENT_STATE_OFF state or that are scheduled for other PMUs. Reported-by: Pawel Moll <pawel.moll@arm.com> Acked-by: Jamie Iles <jamie.iles@picochip.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> --- arch/arm/kernel/perf_event.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index e2139256524e..ecbb0288e5dd 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -319,8 +319,8 @@ validate_event(struct cpu_hw_events *cpuc, { struct hw_perf_event fake_event = event->hw; - if (event->pmu && event->pmu != &pmu) - return 0; + if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) + return 1; return armpmu->get_event_idx(cpuc, &fake_event) >= 0; } -- cgit v1.2.3 From b2b163bb82b12bae2504a5b31399c37d099ad3cc Mon Sep 17 00:00:00 2001 From: Russell King <rmk+kernel@arm.linux.org.uk> Date: Fri, 17 Sep 2010 14:56:16 +0100 Subject: ARM: prevent multiple syscall restarts Al Viro reports that calling "sys_sigsuspend(-ERESTARTNOHAND, 0, 0)" with two signals coming and being handled in kernel space results in the syscall restart being done twice. Avoid this by clearing the 'why' flag when we call the signal handling code to prevent further syscall restarts after the first. Acked-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> --- arch/arm/kernel/entry-common.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f05a35a59694..4a560d30793d 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -48,6 +48,8 @@ work_pending: beq no_work_pending mov r0, sp @ 'regs' mov r2, why @ 'syscall' + tst r1, #_TIF_SIGPENDING @ delivering a signal? + movne why, #0 @ prevent further restarts bl do_notify_resume b ret_slow_syscall @ Check work again -- cgit v1.2.3 From 653d48b22166db2d8b1515ebe6f9f0f7c95dfc86 Mon Sep 17 00:00:00 2001 From: Al Viro <viro@zeniv.linux.org.uk> Date: Fri, 17 Sep 2010 14:34:39 +0100 Subject: arm: fix really nasty sigreturn bug If a signal hits us outside of a syscall and another gets delivered when we are in sigreturn (e.g. because it had been in sa_mask for the first one and got sent to us while we'd been in the first handler), we have a chance of returning from the second handler to location one insn prior to where we ought to return. If r0 happens to contain -513 (-ERESTARTNOINTR), sigreturn will get confused into doing restart syscall song and dance. Incredible joy to debug, since it manifests as random, infrequent and very hard to reproduce double execution of instructions in userland code... The fix is simple - mark it "don't bother with restarts" in wrapper, i.e. set r8 to 0 in sys_sigreturn and sys_rt_sigreturn wrappers, suppressing the syscall restart handling on return from these guys. They can't legitimately return a restart-worthy error anyway. Testcase: #include <unistd.h> #include <signal.h> #include <stdlib.h> #include <sys/time.h> #include <errno.h> void f(int n) { __asm__ __volatile__( "ldr r0, [%0]\n" "b 1f\n" "b 2f\n" "1:b .\n" "2:\n" : : "r"(&n)); } void handler1(int sig) { } void handler2(int sig) { raise(1); } void handler3(int sig) { exit(0); } main() { struct sigaction s = {.sa_handler = handler2}; struct itimerval t1 = { .it_value = {1} }; struct itimerval t2 = { .it_value = {2} }; signal(1, handler1); sigemptyset(&s.sa_mask); sigaddset(&s.sa_mask, 1); sigaction(SIGALRM, &s, NULL); signal(SIGVTALRM, handler3); setitimer(ITIMER_REAL, &t1, NULL); setitimer(ITIMER_VIRTUAL, &t2, NULL); f(-513); /* -ERESTARTNOINTR */ write(1, "buggered\n", 9); return 1; } Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Acked-by: Russell King <rmk+kernel@arm.linux.org.uk> Cc: stable@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> --- arch/arm/kernel/entry-common.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f05a35a59694..1b560825e1cf 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -418,11 +418,13 @@ ENDPROC(sys_clone_wrapper) sys_sigreturn_wrapper: add r0, sp, #S_OFF + mov why, #0 @ prevent syscall restart handling b sys_sigreturn ENDPROC(sys_sigreturn_wrapper) sys_rt_sigreturn_wrapper: add r0, sp, #S_OFF + mov why, #0 @ prevent syscall restart handling b sys_rt_sigreturn ENDPROC(sys_rt_sigreturn_wrapper) -- cgit v1.2.3