From edf2ed153bcae52de70db00a98b0e81a5668e563 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 23 Mar 2011 10:37:00 +0100 Subject: ptrace: Kill tracehook_notify_jctl() tracehook_notify_jctl() aids in determining whether and what to report to the parent when a task is stopped or continued. The function also adds an extra requirement that siglock may be released across it, which is currently unused and quite difficult to satisfy in well-defined manner. As job control and the notifications are about to receive major overhaul, remove the tracehook and open code it. If ever necessary, let's factor it out after the overhaul. * Oleg spotted incorrect CLD_CONTINUED/STOPPED selection when ptraced. Fixed. Signed-off-by: Tejun Heo Cc: Oleg Nesterov Cc: Roland McGrath --- include/linux/tracehook.h | 27 --------------------------- 1 file changed, 27 deletions(-) (limited to 'include/linux') diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 3a2e66d88a32..b073f3c8adc3 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -468,33 +468,6 @@ static inline int tracehook_get_signal(struct task_struct *task, return 0; } -/** - * tracehook_notify_jctl - report about job control stop/continue - * @notify: zero, %CLD_STOPPED or %CLD_CONTINUED - * @why: %CLD_STOPPED or %CLD_CONTINUED - * - * This is called when we might call do_notify_parent_cldstop(). - * - * @notify is zero if we would not ordinarily send a %SIGCHLD, - * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD. - * - * @why is %CLD_STOPPED when about to stop for job control; - * we are already in %TASK_STOPPED state, about to call schedule(). - * It might also be that we have just exited (check %PF_EXITING), - * but need to report that a group-wide stop is complete. - * - * @why is %CLD_CONTINUED when waking up after job control stop and - * ready to make a delayed @notify report. - * - * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal. - * - * Called with the siglock held. - */ -static inline int tracehook_notify_jctl(int notify, int why) -{ - return notify ?: (current->ptrace & PT_PTRACED) ? why : 0; -} - /** * tracehook_finish_jctl - report about return from job control stop * -- cgit v1.2.3 From e5c1902e9260a0075ea52cb5ef627a8d9aaede89 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 23 Mar 2011 10:37:00 +0100 Subject: signal: Fix premature completion of group stop when interfered by ptrace task->signal->group_stop_count is used to track the progress of group stop. It's initialized to the number of tasks which need to stop for group stop to finish and each stopping or trapping task decrements. However, each task doesn't keep track of whether it decremented the counter or not and if woken up before the group stop is complete and stops again, it can decrement the counter multiple times. Please consider the following example code. static void *worker(void *arg) { while (1) ; return NULL; } int main(void) { pthread_t thread; pid_t pid; int i; pid = fork(); if (!pid) { for (i = 0; i < 5; i++) pthread_create(&thread, NULL, worker, NULL); while (1) ; return 0; } ptrace(PTRACE_ATTACH, pid, NULL, NULL); while (1) { waitid(P_PID, pid, NULL, WSTOPPED); ptrace(PTRACE_SINGLESTEP, pid, NULL, (void *)(long)SIGSTOP); } return 0; } The child creates five threads and the parent continuously traps the first thread and whenever the child gets a signal, SIGSTOP is delivered. If an external process sends SIGSTOP to the child, all other threads in the process should reliably stop. However, due to the above bug, the first thread will often end up consuming group_stop_count multiple times and SIGSTOP often ends up stopping none or part of the other four threads. This patch adds a new field task->group_stop which is protected by siglock and uses GROUP_STOP_CONSUME flag to track which task is still to consume group_stop_count to fix this bug. task_clear_group_stop_pending() and task_participate_group_stop() are added to help manipulating group stop states. As ptrace_stop() now also uses task_participate_group_stop(), it will set SIGNAL_STOP_STOPPED if it completes a group stop. There still are many issues regarding the interaction between group stop and ptrace. Patches to address them will follow. - Oleg spotted duplicate GROUP_STOP_CONSUME. Dropped. Signed-off-by: Tejun Heo Acked-by: Oleg Nesterov Cc: Roland McGrath --- include/linux/sched.h | 6 +++++ kernel/signal.c | 62 ++++++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 60 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 4b601be3dace..85f51042c2b8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1260,6 +1260,7 @@ struct task_struct { int exit_state; int exit_code, exit_signal; int pdeath_signal; /* The signal sent when the parent dies */ + unsigned int group_stop; /* GROUP_STOP_*, siglock protected */ /* ??? */ unsigned int personality; unsigned did_exec:1; @@ -1771,6 +1772,11 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) #define used_math() tsk_used_math(current) +/* + * task->group_stop flags + */ +#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */ + #ifdef CONFIG_PREEMPT_RCU #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ diff --git a/kernel/signal.c b/kernel/signal.c index 95ac42dc3bcb..ecb20089eaff 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -223,6 +223,52 @@ static inline void print_dropped_signal(int sig) current->comm, current->pid, sig); } +/** + * task_clear_group_stop_pending - clear pending group stop + * @task: target task + * + * Clear group stop states for @task. + * + * CONTEXT: + * Must be called with @task->sighand->siglock held. + */ +static void task_clear_group_stop_pending(struct task_struct *task) +{ + task->group_stop &= ~GROUP_STOP_CONSUME; +} + +/** + * task_participate_group_stop - participate in a group stop + * @task: task participating in a group stop + * + * @task is participating in a group stop. Group stop states are cleared + * and the group stop count is consumed if %GROUP_STOP_CONSUME was set. If + * the consumption completes the group stop, the appropriate %SIGNAL_* + * flags are set. + * + * CONTEXT: + * Must be called with @task->sighand->siglock held. + */ +static bool task_participate_group_stop(struct task_struct *task) +{ + struct signal_struct *sig = task->signal; + bool consume = task->group_stop & GROUP_STOP_CONSUME; + + task_clear_group_stop_pending(task); + + if (!consume) + return false; + + if (!WARN_ON_ONCE(sig->group_stop_count == 0)) + sig->group_stop_count--; + + if (!sig->group_stop_count) { + sig->flags = SIGNAL_STOP_STOPPED; + return true; + } + return false; +} + /* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an @@ -1645,7 +1691,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) * we must participate in the bookkeeping. */ if (current->signal->group_stop_count > 0) - --current->signal->group_stop_count; + task_participate_group_stop(current); current->last_siginfo = info; current->exit_code = exit_code; @@ -1730,6 +1776,7 @@ static int do_signal_stop(int signr) int notify = 0; if (!sig->group_stop_count) { + unsigned int gstop = GROUP_STOP_CONSUME; struct task_struct *t; if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || @@ -1741,6 +1788,7 @@ static int do_signal_stop(int signr) */ sig->group_exit_code = signr; + current->group_stop = gstop; sig->group_stop_count = 1; for (t = next_thread(current); t != current; t = next_thread(t)) /* @@ -1750,19 +1798,19 @@ static int do_signal_stop(int signr) */ if (!(t->flags & PF_EXITING) && !task_is_stopped_or_traced(t)) { + t->group_stop = gstop; sig->group_stop_count++; signal_wake_up(t, 0); - } + } else + task_clear_group_stop_pending(t); } /* * If there are no other threads in the group, or if there is * a group stop in progress and we are the last to stop, report * to the parent. When ptraced, every thread reports itself. */ - if (!--sig->group_stop_count) { - sig->flags = SIGNAL_STOP_STOPPED; + if (task_participate_group_stop(current)) notify = CLD_STOPPED; - } if (task_ptrace(current)) notify = CLD_STOPPED; @@ -2026,10 +2074,8 @@ void exit_signals(struct task_struct *tsk) recalc_sigpending_and_wake(t); if (unlikely(tsk->signal->group_stop_count) && - !--tsk->signal->group_stop_count) { - tsk->signal->flags = SIGNAL_STOP_STOPPED; + task_participate_group_stop(tsk)) group_stop = CLD_STOPPED; - } out: spin_unlock_irq(&tsk->sighand->siglock); -- cgit v1.2.3 From 39efa3ef3a376a4e53de2f82fc91182459d34200 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 23 Mar 2011 10:37:00 +0100 Subject: signal: Use GROUP_STOP_PENDING to stop once for a single group stop Currently task->signal->group_stop_count is used to decide whether to stop for group stop. However, if there is a task in the group which is taking a long time to stop, other tasks which are continued by ptrace would repeatedly stop for the same group stop until the group stop is complete. Conversely, if a ptraced task is in TASK_TRACED state, the debugger won't get notified of group stops which is inconsistent compared to the ptraced task in any other state. This patch introduces GROUP_STOP_PENDING which tracks whether a task is yet to stop for the group stop in progress. The flag is set when a group stop starts and cleared when the task stops the first time for the group stop, and consulted whenever whether the task should participate in a group stop needs to be determined. Note that now tasks in TASK_TRACED also participate in group stop. This results in the following behavior changes. * For a single group stop, a ptracer would see at most one stop reported. * A ptracee in TASK_TRACED now also participates in group stop and the tracer would get the notification. However, as a ptraced task could be in TASK_STOPPED state or any ptrace trap could consume group stop, the notification may still be missing. These will be addressed with further patches. * A ptracee may start a group stop while one is still in progress if the tracer let it continue with stop signal delivery. Group stop code handles this correctly. Oleg: * Spotted that a task might skip signal check even when its GROUP_STOP_PENDING is set. Fixed by updating recalc_sigpending_tsk() to check GROUP_STOP_PENDING instead of group_stop_count. * Pointed out that task->group_stop should be cleared whenever task->signal->group_stop_count is cleared. Fixed accordingly. * Pointed out the behavior inconsistency between TASK_TRACED and RUNNING and the last behavior change. Signed-off-by: Tejun Heo Acked-by: Oleg Nesterov Cc: Roland McGrath --- fs/exec.c | 1 + include/linux/sched.h | 3 +++ kernel/signal.c | 36 +++++++++++++++++++++--------------- 3 files changed, 25 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/fs/exec.c b/fs/exec.c index 5e62d26a4fec..8328beb9016f 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1659,6 +1659,7 @@ static int zap_process(struct task_struct *start, int exit_code) t = start; do { + task_clear_group_stop_pending(t); if (t != current && t->mm) { sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); diff --git a/include/linux/sched.h b/include/linux/sched.h index 85f51042c2b8..b2a17dfbdbad 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1775,8 +1775,11 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * /* * task->group_stop flags */ +#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */ #define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */ +extern void task_clear_group_stop_pending(struct task_struct *task); + #ifdef CONFIG_PREEMPT_RCU #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ diff --git a/kernel/signal.c b/kernel/signal.c index ecb20089eaff..a2e7a6527d24 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) static int recalc_sigpending_tsk(struct task_struct *t) { - if (t->signal->group_stop_count > 0 || + if ((t->group_stop & GROUP_STOP_PENDING) || PENDING(&t->pending, &t->blocked) || PENDING(&t->signal->shared_pending, &t->blocked)) { set_tsk_thread_flag(t, TIF_SIGPENDING); @@ -232,19 +232,19 @@ static inline void print_dropped_signal(int sig) * CONTEXT: * Must be called with @task->sighand->siglock held. */ -static void task_clear_group_stop_pending(struct task_struct *task) +void task_clear_group_stop_pending(struct task_struct *task) { - task->group_stop &= ~GROUP_STOP_CONSUME; + task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME); } /** * task_participate_group_stop - participate in a group stop * @task: task participating in a group stop * - * @task is participating in a group stop. Group stop states are cleared - * and the group stop count is consumed if %GROUP_STOP_CONSUME was set. If - * the consumption completes the group stop, the appropriate %SIGNAL_* - * flags are set. + * @task has GROUP_STOP_PENDING set and is participating in a group stop. + * Group stop states are cleared and the group stop count is consumed if + * %GROUP_STOP_CONSUME was set. If the consumption completes the group + * stop, the appropriate %SIGNAL_* flags are set. * * CONTEXT: * Must be called with @task->sighand->siglock held. @@ -254,6 +254,8 @@ static bool task_participate_group_stop(struct task_struct *task) struct signal_struct *sig = task->signal; bool consume = task->group_stop & GROUP_STOP_CONSUME; + WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING)); + task_clear_group_stop_pending(task); if (!consume) @@ -765,6 +767,9 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) t = p; do { unsigned int state; + + task_clear_group_stop_pending(t); + rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); /* * If there is a handler for SIGCONT, we must make @@ -906,6 +911,7 @@ static void complete_signal(int sig, struct task_struct *p, int group) signal->group_stop_count = 0; t = p; do { + task_clear_group_stop_pending(t); sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); } while_each_thread(p, t); @@ -1139,6 +1145,7 @@ int zap_other_threads(struct task_struct *p) p->signal->group_stop_count = 0; while_each_thread(p, t) { + task_clear_group_stop_pending(t); count++; /* Don't bother with already dead threads */ @@ -1690,7 +1697,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) * If there is a group stop in progress, * we must participate in the bookkeeping. */ - if (current->signal->group_stop_count > 0) + if (current->group_stop & GROUP_STOP_PENDING) task_participate_group_stop(current); current->last_siginfo = info; @@ -1775,8 +1782,8 @@ static int do_signal_stop(int signr) struct signal_struct *sig = current->signal; int notify = 0; - if (!sig->group_stop_count) { - unsigned int gstop = GROUP_STOP_CONSUME; + if (!(current->group_stop & GROUP_STOP_PENDING)) { + unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME; struct task_struct *t; if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || @@ -1796,8 +1803,7 @@ static int do_signal_stop(int signr) * stop is always done with the siglock held, * so this check has no races. */ - if (!(t->flags & PF_EXITING) && - !task_is_stopped_or_traced(t)) { + if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { t->group_stop = gstop; sig->group_stop_count++; signal_wake_up(t, 0); @@ -1926,8 +1932,8 @@ relock: if (unlikely(signr != 0)) ka = return_ka; else { - if (unlikely(signal->group_stop_count > 0) && - do_signal_stop(0)) + if (unlikely(current->group_stop & + GROUP_STOP_PENDING) && do_signal_stop(0)) goto relock; signr = dequeue_signal(current, ¤t->blocked, @@ -2073,7 +2079,7 @@ void exit_signals(struct task_struct *tsk) if (!signal_pending(t) && !(t->flags & PF_EXITING)) recalc_sigpending_and_wake(t); - if (unlikely(tsk->signal->group_stop_count) && + if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) && task_participate_group_stop(tsk)) group_stop = CLD_STOPPED; out: -- cgit v1.2.3 From d79fdd6d96f46fabb779d86332e3677c6f5c2a4f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 23 Mar 2011 10:37:00 +0100 Subject: ptrace: Clean transitions between TASK_STOPPED and TRACED Currently, if the task is STOPPED on ptrace attach, it's left alone and the state is silently changed to TRACED on the next ptrace call. The behavior breaks the assumption that arch_ptrace_stop() is called before any task is poked by ptrace and is ugly in that a task manipulates the state of another task directly. With GROUP_STOP_PENDING, the transitions between TASK_STOPPED and TRACED can be made clean. The tracer can use the flag to tell the tracee to retry stop on attach and detach. On retry, the tracee will enter the desired state in the correct way. The lower 16bits of task->group_stop is used to remember the signal number which caused the last group stop. This is used while retrying for ptrace attach as the original group_exit_code could have been consumed with wait(2) by then. As the real parent may wait(2) and consume the group_exit_code anytime, the group_exit_code needs to be saved separately so that it can be used when switching from regular sleep to ptrace_stop(). This is recorded in the lower 16bits of task->group_stop. If a task is already stopped and there's no intervening SIGCONT, a ptrace request immediately following a successful PTRACE_ATTACH should always succeed even if the tracer doesn't wait(2) for attach completion; however, with this change, the tracee might still be TASK_RUNNING trying to enter TASK_TRACED which would cause the following request to fail with -ESRCH. This intermediate state is hidden from the ptracer by setting GROUP_STOP_TRAPPING on attach and making ptrace_check_attach() wait for it to clear on its signal->wait_chldexit. Completing the transition or getting killed clears TRAPPING and wakes up the tracer. Note that the STOPPED -> RUNNING -> TRACED transition is still visible to other threads which are in the same group as the ptracer and the reverse transition is visible to all. Please read the comments for details. Oleg: * Spotted a race condition where a task may retry group stop without proper bookkeeping. Fixed by redoing bookkeeping on retry. * Spotted that the transition is visible to userland in several different ways. Most are fixed with GROUP_STOP_TRAPPING. Unhandled corner case is documented. * Pointed out not setting GROUP_STOP_SIGMASK on an already stopped task would result in more consistent behavior. * Pointed out that calling ptrace_stop() from do_signal_stop() in TASK_STOPPED can race with group stop start logic and then confuse the TRAPPING wait in ptrace_check_attach(). ptrace_stop() is now called with TASK_RUNNING. * Suggested using signal->wait_chldexit instead of bit wait. * Spotted a race condition between TRACED transition and clearing of TRAPPING. Signed-off-by: Tejun Heo Acked-by: Oleg Nesterov Cc: Roland McGrath Cc: Jan Kratochvil --- include/linux/sched.h | 2 ++ kernel/ptrace.c | 49 ++++++++++++++++++++++++++++---- kernel/signal.c | 79 ++++++++++++++++++++++++++++++++++++++++++--------- 3 files changed, 112 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index b2a17dfbdbad..456d80ed3b78 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1775,8 +1775,10 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * /* * task->group_stop flags */ +#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */ #define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */ #define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */ +#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */ extern void task_clear_group_stop_pending(struct task_struct *task); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 6acf8954017c..745fc2dd00c5 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -49,14 +49,22 @@ static void ptrace_untrace(struct task_struct *child) spin_lock(&child->sighand->siglock); if (task_is_traced(child)) { /* - * If the group stop is completed or in progress, - * this thread was already counted as stopped. + * If group stop is completed or in progress, it should + * participate in the group stop. Set GROUP_STOP_PENDING + * before kicking it. + * + * This involves TRACED -> RUNNING -> STOPPED transition + * which is similar to but in the opposite direction of + * what happens while attaching to a stopped task. + * However, in this direction, the intermediate RUNNING + * state is not hidden even from the current ptracer and if + * it immediately re-attaches and performs a WNOHANG + * wait(2), it may fail. */ if (child->signal->flags & SIGNAL_STOP_STOPPED || child->signal->group_stop_count) - __set_task_state(child, TASK_STOPPED); - else - signal_wake_up(child, 1); + child->group_stop |= GROUP_STOP_PENDING; + signal_wake_up(child, 1); } spin_unlock(&child->sighand->siglock); } @@ -165,6 +173,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) static int ptrace_attach(struct task_struct *task) { + bool wait_trap = false; int retval; audit_ptrace(task); @@ -204,12 +213,42 @@ static int ptrace_attach(struct task_struct *task) __ptrace_link(task, current); send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); + spin_lock(&task->sighand->siglock); + + /* + * If the task is already STOPPED, set GROUP_STOP_PENDING and + * TRAPPING, and kick it so that it transits to TRACED. TRAPPING + * will be cleared if the child completes the transition or any + * event which clears the group stop states happens. We'll wait + * for the transition to complete before returning from this + * function. + * + * This hides STOPPED -> RUNNING -> TRACED transition from the + * attaching thread but a different thread in the same group can + * still observe the transient RUNNING state. IOW, if another + * thread's WNOHANG wait(2) on the stopped tracee races against + * ATTACH, the wait(2) may fail due to the transient RUNNING. + * + * The following task_is_stopped() test is safe as both transitions + * in and out of STOPPED are protected by siglock. + */ + if (task_is_stopped(task)) { + task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING; + signal_wake_up(task, 1); + wait_trap = true; + } + + spin_unlock(&task->sighand->siglock); + retval = 0; unlock_tasklist: write_unlock_irq(&tasklist_lock); unlock_creds: mutex_unlock(&task->signal->cred_guard_mutex); out: + if (wait_trap) + wait_event(current->signal->wait_chldexit, + !(task->group_stop & GROUP_STOP_TRAPPING)); return retval; } diff --git a/kernel/signal.c b/kernel/signal.c index 418776c41d24..1e199919ae09 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -223,6 +223,26 @@ static inline void print_dropped_signal(int sig) current->comm, current->pid, sig); } +/** + * task_clear_group_stop_trapping - clear group stop trapping bit + * @task: target task + * + * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it + * and wake up the ptracer. Note that we don't need any further locking. + * @task->siglock guarantees that @task->parent points to the ptracer. + * + * CONTEXT: + * Must be called with @task->sighand->siglock held. + */ +static void task_clear_group_stop_trapping(struct task_struct *task) +{ + if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) { + task->group_stop &= ~GROUP_STOP_TRAPPING; + __wake_up_sync(&task->parent->signal->wait_chldexit, + TASK_UNINTERRUPTIBLE, 1); + } +} + /** * task_clear_group_stop_pending - clear pending group stop * @task: target task @@ -1706,8 +1726,20 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) current->last_siginfo = info; current->exit_code = exit_code; - /* Let the debugger run. */ - __set_current_state(TASK_TRACED); + /* + * TRACED should be visible before TRAPPING is cleared; otherwise, + * the tracer might fail do_wait(). + */ + set_current_state(TASK_TRACED); + + /* + * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and + * transition to TASK_TRACED should be atomic with respect to + * siglock. This hsould be done after the arch hook as siglock is + * released and regrabbed across it. + */ + task_clear_group_stop_trapping(current); + spin_unlock_irq(¤t->sighand->siglock); read_lock(&tasklist_lock); if (may_ptrace_stop()) { @@ -1788,6 +1820,9 @@ static int do_signal_stop(int signr) unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME; struct task_struct *t; + /* signr will be recorded in task->group_stop for retries */ + WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK); + if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || unlikely(signal_group_exit(sig))) return 0; @@ -1797,25 +1832,27 @@ static int do_signal_stop(int signr) */ sig->group_exit_code = signr; - current->group_stop = gstop; + current->group_stop &= ~GROUP_STOP_SIGMASK; + current->group_stop |= signr | gstop; sig->group_stop_count = 1; - for (t = next_thread(current); t != current; t = next_thread(t)) + for (t = next_thread(current); t != current; + t = next_thread(t)) { + t->group_stop &= ~GROUP_STOP_SIGMASK; /* * Setting state to TASK_STOPPED for a group * stop is always done with the siglock held, * so this check has no races. */ if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { - t->group_stop = gstop; + t->group_stop |= signr | gstop; sig->group_stop_count++; signal_wake_up(t, 0); - } else + } else { task_clear_group_stop_pending(t); + } + } } - - current->exit_code = sig->group_exit_code; - __set_current_state(TASK_STOPPED); - +retry: if (likely(!task_ptrace(current))) { int notify = 0; @@ -1827,6 +1864,7 @@ static int do_signal_stop(int signr) if (task_participate_group_stop(current)) notify = CLD_STOPPED; + __set_current_state(TASK_STOPPED); spin_unlock_irq(¤t->sighand->siglock); if (notify) { @@ -1839,13 +1877,28 @@ static int do_signal_stop(int signr) schedule(); spin_lock_irq(¤t->sighand->siglock); - } else - ptrace_stop(current->exit_code, CLD_STOPPED, 0, NULL); + } else { + ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK, + CLD_STOPPED, 0, NULL); + current->exit_code = 0; + } + + /* + * GROUP_STOP_PENDING could be set if another group stop has + * started since being woken up or ptrace wants us to transit + * between TASK_STOPPED and TRACED. Retry group stop. + */ + if (current->group_stop & GROUP_STOP_PENDING) { + WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK)); + goto retry; + } + + /* PTRACE_ATTACH might have raced with task killing, clear trapping */ + task_clear_group_stop_trapping(current); spin_unlock_irq(¤t->sighand->siglock); tracehook_finish_jctl(); - current->exit_code = 0; return 1; } -- cgit v1.2.3 From ee77f075921730b2b465880f9fd4367003bdab39 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 1 Apr 2011 20:12:38 +0200 Subject: signal: Turn SIGNAL_STOP_DEQUEUED into GROUP_STOP_DEQUEUED This patch moves SIGNAL_STOP_DEQUEUED from signal_struct->flags to task_struct->group_stop, and thus makes it per-thread. Like SIGNAL_STOP_DEQUEUED, GROUP_STOP_DEQUEUED can be false-positive after return from get_signal_to_deliver(), this is fine. The only purpose of this bit is: we can drop ->siglock after __dequeue_signal() returns the sig_kernel_stop() signal and before we call do_signal_stop(), in this case we must not miss SIGCONT if it comes in between. But, unlike SIGNAL_STOP_DEQUEUED, GROUP_STOP_DEQUEUED can not be false-positive in do_signal_stop() if multiple threads dequeue the sig_kernel_stop() signal at the same time. Consider two threads T1 and T2, SIGTTIN has a hanlder. - T1 dequeues SIGTSTP and sets SIGNAL_STOP_DEQUEUED, then it drops ->siglock - SIGCONT comes and clears SIGNAL_STOP_DEQUEUED, SIGTSTP should be cancelled. - T2 dequeues SIGTTIN and sets SIGNAL_STOP_DEQUEUED again. Since we have a handler we should not stop, T2 returns to usermode to run the handler. - T1 continues, calls do_signal_stop() and wrongly starts the group stop because SIGNAL_STOP_DEQUEUED was restored in between. With or without this change: - we need to do something with ptrace_signal() which can return SIGSTOP, but this needs another discussion - SIGSTOP can be lost if it races with the mt exec, will be fixed later. Signed-off-by: Oleg Nesterov Signed-off-by: Tejun Heo --- include/linux/sched.h | 6 +++--- kernel/signal.c | 14 ++++---------- 2 files changed, 7 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 456d80ed3b78..8cef82d4cf77 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -652,9 +652,8 @@ struct signal_struct { * Bits in flags field of signal_struct. */ #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ -#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ -#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ -#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ +#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ +#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ /* * Pending notifications to parent. */ @@ -1779,6 +1778,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */ #define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */ #define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */ +#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */ extern void task_clear_group_stop_pending(struct task_struct *task); diff --git a/kernel/signal.c b/kernel/signal.c index e9abc69dc0d8..4f7312b49b2d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -254,7 +254,8 @@ static void task_clear_group_stop_trapping(struct task_struct *task) */ void task_clear_group_stop_pending(struct task_struct *task) { - task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME); + task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME | + GROUP_STOP_DEQUEUED); } /** @@ -602,7 +603,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) * is to alert stop-signal processing code when another * processor has come along and cleared the flag. */ - tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; + current->group_stop |= GROUP_STOP_DEQUEUED; } if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { /* @@ -821,13 +822,6 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) signal->flags = why | SIGNAL_STOP_CONTINUED; signal->group_stop_count = 0; signal->group_exit_code = 0; - } else { - /* - * We are not stopped, but there could be a stop - * signal in the middle of being processed after - * being removed from the queue. Clear that too. - */ - signal->flags &= ~SIGNAL_STOP_DEQUEUED; } } @@ -1855,7 +1849,7 @@ static int do_signal_stop(int signr) /* signr will be recorded in task->group_stop for retries */ WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK); - if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || + if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) || unlikely(signal_group_exit(sig))) return 0; /* -- cgit v1.2.3 From e6fa16ab9c1e9b344428e6fea4d29e3cc4b28fb0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 27 Apr 2011 20:59:41 +0200 Subject: signal: sigprocmask() should do retarget_shared_pending() In short, almost every changing of current->blocked is wrong, or at least can lead to the unexpected results. For example. Two threads T1 and T2, T1 sleeps in sigtimedwait/pause/etc. kill(tgid, SIG) can pick T2 for TIF_SIGPENDING. If T2 calls sigprocmask() and blocks SIG before it notices the pending signal, nobody else can handle this pending shared signal. I am not sure this is bug, but at least this looks strange imho. T1 should not sleep forever, there is a signal which should wake it up. This patch moves the code which actually changes ->blocked into the new helper, set_current_blocked() and changes this code to call retarget_shared_pending() as exit_signals() does. We should only care about the signals we just blocked, we use "newset & ~current->blocked" as a mask. We do not check !sigisemptyset(newblocked), retarget_shared_pending() is cheap unless mask & shared_pending. Note: for this particular case we could simply change sigprocmask() to return -EINTR if signal_pending(), but then we should change other callers and, more importantly, if we need this fix then set_current_blocked() will have more callers and some of them can't restart. See the next patch as a random example. Signed-off-by: Oleg Nesterov Reviewed-by: Matt Fleming Acked-by: Tejun Heo --- include/linux/signal.h | 1 + kernel/signal.c | 29 ++++++++++++++++++++++++----- 2 files changed, 25 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/signal.h b/include/linux/signal.h index fcd2b14b1932..ba009c167275 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -243,6 +243,7 @@ extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info); extern long do_sigpending(void __user *, unsigned long); extern int sigprocmask(int, sigset_t *, sigset_t *); +extern void set_current_blocked(const sigset_t *); extern int show_unhandled_signals; struct pt_regs; diff --git a/kernel/signal.c b/kernel/signal.c index e8308e3238c1..8aa3a2e226af 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2299,6 +2299,29 @@ long do_no_restart_syscall(struct restart_block *param) return -EINTR; } +/** + * set_current_blocked - change current->blocked mask + * @newset: new mask + * + * It is wrong to change ->blocked directly, this helper should be used + * to ensure the process can't miss a shared signal we are going to block. + */ +void set_current_blocked(const sigset_t *newset) +{ + struct task_struct *tsk = current; + + spin_lock_irq(&tsk->sighand->siglock); + if (signal_pending(tsk) && !thread_group_empty(tsk)) { + sigset_t newblocked; + /* A set of now blocked but previously unblocked signals. */ + signandsets(&newblocked, newset, ¤t->blocked); + retarget_shared_pending(tsk, &newblocked); + } + tsk->blocked = *newset; + recalc_sigpending(); + spin_unlock_irq(&tsk->sighand->siglock); +} + /* * This is also useful for kernel threads that want to temporarily * (or permanently) block certain signals. @@ -2330,11 +2353,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) return -EINVAL; } - spin_lock_irq(&tsk->sighand->siglock); - tsk->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(&tsk->sighand->siglock); - + set_current_blocked(&newset); return 0; } -- cgit v1.2.3 From 943df1485a8ff0e600729e082e568ece04d4de9e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 27 Apr 2011 21:44:14 +0200 Subject: signal: introduce do_sigtimedwait() to factor out compat/native code Factor out the common code in sys_rt_sigtimedwait/compat_sys_rt_sigtimedwait to the new helper, do_sigtimedwait(). Add the comment to document the extra tick we add to timespec_to_jiffies(ts), thanks to Linus who explained this to me. Perhaps it would be better to move compat_sys_rt_sigtimedwait() into signal.c under CONFIG_COMPAT, then we can make do_sigtimedwait() static. Signed-off-by: Oleg Nesterov Acked-by: Tejun Heo Reviewed-by: Matt Fleming --- include/linux/signal.h | 2 + kernel/compat.c | 41 ++++-------------- kernel/signal.c | 110 +++++++++++++++++++++++++++++-------------------- 3 files changed, 74 insertions(+), 79 deletions(-) (limited to 'include/linux') diff --git a/include/linux/signal.h b/include/linux/signal.h index ba009c167275..782546d661ba 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -242,6 +242,8 @@ extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info); extern long do_sigpending(void __user *, unsigned long); +extern int do_sigtimedwait(const sigset_t *, siginfo_t *, + const struct timespec *); extern int sigprocmask(int, sigset_t *, sigset_t *); extern void set_current_blocked(const sigset_t *); extern int show_unhandled_signals; diff --git a/kernel/compat.c b/kernel/compat.c index 06cbb0619531..9214dcd087b7 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -890,10 +890,9 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, { compat_sigset_t s32; sigset_t s; - int sig; struct timespec t; siginfo_t info; - long ret, timeout; + long ret; if (sigsetsize != sizeof(sigset_t)) return -EINVAL; @@ -901,45 +900,19 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese, if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t))) return -EFAULT; sigset_from_compat(&s, &s32); - sigdelsetmask(&s,sigmask(SIGKILL)|sigmask(SIGSTOP)); - signotset(&s); - timeout = MAX_SCHEDULE_TIMEOUT; if (uts) { - if (get_compat_timespec (&t, uts)) + if (get_compat_timespec(&t, uts)) return -EFAULT; - if (!timespec_valid(&t)) - return -EINVAL; - timeout = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); } - spin_lock_irq(¤t->sighand->siglock); - sig = dequeue_signal(current, &s, &info); - if (!sig && timeout) { - current->real_blocked = current->blocked; - sigandsets(¤t->blocked, ¤t->blocked, &s); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - timeout = schedule_timeout_interruptible(timeout); - - spin_lock_irq(¤t->sighand->siglock); - sig = dequeue_signal(current, &s, &info); - current->blocked = current->real_blocked; - siginitset(¤t->real_blocked, 0); - recalc_sigpending(); - } - spin_unlock_irq(¤t->sighand->siglock); + ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); - if (sig) { - ret = sig; - if (uinfo) { - if (copy_siginfo_to_user32(uinfo, &info)) - ret = -EFAULT; - } - } else { - ret = timeout?-EINTR:-EAGAIN; + if (ret > 0 && uinfo) { + if (copy_siginfo_to_user32(uinfo, &info)) + ret = -EFAULT; } + return ret; } diff --git a/kernel/signal.c b/kernel/signal.c index c734619554f6..1ab89f677424 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2503,6 +2503,66 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) #endif +/** + * do_sigtimedwait - wait for queued signals specified in @which + * @which: queued signals to wait for + * @info: if non-null, the signal's siginfo is returned here + * @ts: upper bound on process time suspension + */ +int do_sigtimedwait(const sigset_t *which, siginfo_t *info, + const struct timespec *ts) +{ + struct task_struct *tsk = current; + long timeout = MAX_SCHEDULE_TIMEOUT; + sigset_t mask = *which; + int sig; + + if (ts) { + if (!timespec_valid(ts)) + return -EINVAL; + timeout = timespec_to_jiffies(ts); + /* + * We can be close to the next tick, add another one + * to ensure we will wait at least the time asked for. + */ + if (ts->tv_sec || ts->tv_nsec) + timeout++; + } + + /* + * Invert the set of allowed signals to get those we want to block. + */ + sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); + signotset(&mask); + + spin_lock_irq(&tsk->sighand->siglock); + sig = dequeue_signal(tsk, &mask, info); + if (!sig && timeout) { + /* + * None ready, temporarily unblock those we're interested + * while we are sleeping in so that we'll be awakened when + * they arrive. + */ + tsk->real_blocked = tsk->blocked; + sigandsets(&tsk->blocked, &tsk->blocked, &mask); + recalc_sigpending(); + spin_unlock_irq(&tsk->sighand->siglock); + + timeout = schedule_timeout_interruptible(timeout); + + spin_lock_irq(&tsk->sighand->siglock); + sig = dequeue_signal(tsk, &mask, info); + tsk->blocked = tsk->real_blocked; + siginitset(&tsk->real_blocked, 0); + recalc_sigpending(); + } + spin_unlock_irq(&tsk->sighand->siglock); + + if (sig) + return sig; + return timeout ? -EINTR : -EAGAIN; +} + /** * sys_rt_sigtimedwait - synchronously wait for queued signals specified * in @uthese @@ -2515,11 +2575,10 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, siginfo_t __user *, uinfo, const struct timespec __user *, uts, size_t, sigsetsize) { - int ret, sig; sigset_t these; struct timespec ts; siginfo_t info; - long timeout; + int ret; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) @@ -2528,55 +2587,16 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, if (copy_from_user(&these, uthese, sizeof(these))) return -EFAULT; - /* - * Invert the set of allowed signals to get those we - * want to block. - */ - sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); - signotset(&these); - - timeout = MAX_SCHEDULE_TIMEOUT; if (uts) { if (copy_from_user(&ts, uts, sizeof(ts))) return -EFAULT; - if (!timespec_valid(&ts)) - return -EINVAL; - timeout = timespec_to_jiffies(&ts) + (ts.tv_sec || ts.tv_nsec); } - spin_lock_irq(¤t->sighand->siglock); - sig = dequeue_signal(current, &these, &info); - if (!sig && timeout) { - /* - * None ready -- temporarily unblock those we're - * interested while we are sleeping in so that we'll - * be awakened when they arrive. - */ - current->real_blocked = current->blocked; - sigandsets(¤t->blocked, ¤t->blocked, &these); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - timeout = schedule_timeout_interruptible(timeout); - - spin_lock_irq(¤t->sighand->siglock); - sig = dequeue_signal(current, &these, &info); - current->blocked = current->real_blocked; - siginitset(¤t->real_blocked, 0); - recalc_sigpending(); - } - spin_unlock_irq(¤t->sighand->siglock); + ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); - if (sig) { - ret = sig; - if (uinfo) { - if (copy_siginfo_to_user(uinfo, &info)) - ret = -EFAULT; - } - } else { - ret = -EAGAIN; - if (timeout) - ret = -EINTR; + if (ret > 0 && uinfo) { + if (copy_siginfo_to_user(uinfo, &info)) + ret = -EFAULT; } return ret; -- cgit v1.2.3 From 702a5073fdb71eb29cd4912575289fb5044c1894 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 27 Apr 2011 22:01:27 +0200 Subject: signal: rename signandsets() to sigandnsets() As Tejun and Linus pointed out, "nand" is the wrong name for "x & ~y", it should be "andn". Rename signandsets() as suggested. Suggested-by: Tejun Heo Signed-off-by: Oleg Nesterov Acked-by: Tejun Heo --- include/linux/signal.h | 6 +++--- kernel/signal.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/signal.h b/include/linux/signal.h index 782546d661ba..7e2526374fd7 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -123,13 +123,13 @@ _SIG_SET_BINOP(sigorsets, _sig_or) #define _sig_and(x,y) ((x) & (y)) _SIG_SET_BINOP(sigandsets, _sig_and) -#define _sig_nand(x,y) ((x) & ~(y)) -_SIG_SET_BINOP(signandsets, _sig_nand) +#define _sig_andn(x,y) ((x) & ~(y)) +_SIG_SET_BINOP(sigandnsets, _sig_andn) #undef _SIG_SET_BINOP #undef _sig_or #undef _sig_and -#undef _sig_nand +#undef _sig_andn #define _SIG_SET_OP(name, op) \ static inline void name(sigset_t *set) \ diff --git a/kernel/signal.c b/kernel/signal.c index 4d97e11d7672..e7ee4e642c5a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -669,7 +669,7 @@ static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) if (sigisemptyset(&m)) return 0; - signandsets(&s->signal, &s->signal, mask); + sigandnsets(&s->signal, &s->signal, mask); list_for_each_entry_safe(q, n, &s->list, list) { if (sigismember(mask, q->info.si_signo)) { list_del_init(&q->list); @@ -2304,7 +2304,7 @@ static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) if (signal_pending(tsk) && !thread_group_empty(tsk)) { sigset_t newblocked; /* A set of now blocked but previously unblocked signals. */ - signandsets(&newblocked, newset, ¤t->blocked); + sigandnsets(&newblocked, newset, ¤t->blocked); retarget_shared_pending(tsk, &newblocked); } tsk->blocked = *newset; @@ -2349,7 +2349,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) sigorsets(&newset, &tsk->blocked, set); break; case SIG_UNBLOCK: - signandsets(&newset, &tsk->blocked, set); + sigandnsets(&newset, &tsk->blocked, set); break; case SIG_SETMASK: newset = *set; -- cgit v1.2.3 From b2b07e4fdbc51383cfc0ba5618c2ddf5c9d038f2 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 18 May 2011 15:08:03 +0200 Subject: signal: trivial, fix the "timespec declared inside parameter list" warning Fix the compile warning, do_sigtimedwait(struct timespec *) in signal.h needs the forward declaration of timespec. Reported-and-acked-by: Mike Frysinger Signed-off-by: Oleg Nesterov --- include/linux/signal.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/signal.h b/include/linux/signal.h index 7e2526374fd7..a44e7f062238 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -234,6 +234,9 @@ static inline int valid_signal(unsigned long sig) return sig <= _NSIG ? 1 : 0; } +struct timespec; +struct pt_regs; + extern int next_signal(struct sigpending *pending, sigset_t *mask); extern int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, bool group); @@ -248,7 +251,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *); extern void set_current_blocked(const sigset_t *); extern int show_unhandled_signals; -struct pt_regs; extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); extern void exit_signals(struct task_struct *tsk); -- cgit v1.2.3