kernel/signal.c
27976 /*
27977 * linux/kernel/signal.c
27978 *
27979 * Copyright (C) 1991, 1992 Linus Torvalds
27980 *
27981 * 1997-11-02 Modified for POSIX.1b signals by Richard
27982 * Henderson
27983 */
27984
27985 #include <linux/slab.h>
27986 #include <linux/module.h>
27987 #include <linux/unistd.h>
27988 #include <linux/smp_lock.h>
27989 #include <linux/init.h>
27990
27991 #include <asm/uaccess.h>
27992
27993 /* SLAB caches for signal bits. */
27994
27995 #define DEBUG_SIG 0
27996
27997 #if DEBUG_SIG
27998 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE)
27999 /* | SLAB_POISON */
28000 #else
28001 #define SIG_SLAB_DEBUG 0
28002 #endif
28003
28004 static kmem_cache_t *signal_queue_cachep;
28005
28006 int nr_queued_signals;
28007 int max_queued_signals = 1024;
28008
28009 void __init signals_init(void)
28010 {
28011 signal_queue_cachep =
28012 kmem_cache_create("signal_queue",
28013 sizeof(struct signal_queue),
28014 __alignof__(struct signal_queue),
28015 SIG_SLAB_DEBUG, NULL, NULL);
28016 }
28017
28018
28019 /* Flush all pending signals for a task. */
28020 void
28021 flush_signals(struct task_struct *t)
28022 {
28023 struct signal_queue *q, *n;
28024
28025 t->sigpending = 0;
28026 sigemptyset(&t->signal);
28027 q = t->sigqueue;
28028 t->sigqueue = NULL;
28029 t->sigqueue_tail = &t->sigqueue;
28030
28031 while (q) {
28032 n = q->next;
28033 kmem_cache_free(signal_queue_cachep, q);
28034 nr_queued_signals--;
28035 q = n;
28036 }
28037 }
28038
28039 /* Flush all handlers for a task. */
28040 void
28041 flush_signal_handlers(struct task_struct *t)
28042 {
28043 int i;
28044 struct k_sigaction *ka = &t->sig->action[0];
28045 for (i = _NSIG ; i != 0 ; i--) {
28046 if (ka->sa.sa_handler != SIG_IGN)
28047 ka->sa.sa_handler = SIG_DFL;
28048 ka->sa.sa_flags = 0;
28049 sigemptyset(&ka->sa.sa_mask);
28050 ka++;
28051 }
28052 }
28053
28054 /* Dequeue a signal and return the element to the caller,
28055 * which is expected to free it.
28056 *
28057 * All callers of must be holding current->sigmask_lock.
28058 */
28059 int
28060 dequeue_signal(sigset_t *mask, siginfo_t *info)
28061 {
28062 unsigned long i, *s, *m, x;
28063 int sig = 0;
28064
28065 #if DEBUG_SIG
28066 printk("SIG dequeue (%s:%d): %d ", current->comm,
28067 current->pid, signal_pending(current));
28068 #endif
28069
28070 /* Find the first desired signal that is pending. */
28071 s = current->signal.sig;
28072 m = mask->sig;
28073 switch (_NSIG_WORDS) {
28074 default:
28075 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
28076 if ((x = *s &~ *m) != 0) {
28077 sig = ffz(~x) + i*_NSIG_BPW + 1;
28078 break;
28079 }
28080 break;
28081
28082 case 2: if ((x = s[0] &~ m[0]) != 0)
28083 sig = 1;
28084 else if ((x = s[1] &~ m[1]) != 0)
28085 sig = _NSIG_BPW + 1;
28086 else
28087 break;
28088 sig += ffz(~x);
28089 break;
28090
28091 case 1: if ((x = *s &~ *m) != 0)
28092 sig = ffz(~x) + 1;
28093 break;
28094 }
28095
28096 if (sig) {
28097 int reset = 1;
28098
28099 /* Collect the siginfo appropriate to this signal. */
28100 if (sig < SIGRTMIN) {
28101 /* XXX: As an extension, support queueing exactly
28102 one non-rt signal if SA_SIGINFO is set, so that
28103 we can get more detailed information about the
28104 cause of the signal. */
28105 /* Deciding not to init these couple of fields is
28106 more expensive that just initializing them. */
28107 info->si_signo = sig;
28108 info->si_errno = 0;
28109 info->si_code = 0;
28110 info->si_pid = 0;
28111 info->si_uid = 0;
28112 } else {
28113 struct signal_queue *q, **pp;
28114 pp = ¤t->sigqueue;
28115 q = current->sigqueue;
28116
28117 /* Find the one we're interested in ... */
28118 for ( ; q ; pp = &q->next, q = q->next)
28119 if (q->info.si_signo == sig)
28120 break;
28121 if (q) {
28122 if ((*pp = q->next) == NULL)
28123 current->sigqueue_tail = pp;
28124 *info = q->info;
28125 kmem_cache_free(signal_queue_cachep,q);
28126 nr_queued_signals--;
28127
28128 /* then see if this signal is still pending. */
28129 q = *pp;
28130 while (q) {
28131 if (q->info.si_signo == sig) {
28132 reset = 0;
28133 break;
28134 }
28135 q = q->next;
28136 }
28137 } else {
28138 /* Ok, it wasn't in the queue. It must have
28139 been sent either by a non-rt mechanism and
28140 we ran out of queue space. So zero out the
28141 info. */
28142 info->si_signo = sig;
28143 info->si_errno = 0;
28144 info->si_code = 0;
28145 info->si_pid = 0;
28146 info->si_uid = 0;
28147 }
28148 }
28149
28150 if (reset)
28151 sigdelset(¤t->signal, sig);
28152 recalc_sigpending(current);
28153
28154 /* XXX: Once POSIX.1b timers are in, if si_code ==
28155 SI_TIMER, we need to xchg out the timer overrun
28156 values. */
28157 } else {
28158 /* XXX: Once CLONE_PID is in to join those "threads"
28159 that are part of the same "process", look for
28160 signals sent to the "process" as well. */
28161
28162 /* Sanity check... */
28163 if (mask == ¤t->blocked &&
28164 signal_pending(current)) {
28165 printk(KERN_CRIT "SIG: sigpending lied\n");
28166 current->sigpending = 0;
28167 }
28168 }
28169
28170 #if DEBUG_SIG
28171 printk(" %d -> %d\n", signal_pending(current), sig);
28172 #endif
28173
28174 return sig;
28175 }
28176
28177 /* Determine whether a signal should be posted or not.
28178 *
28179 * Signals with SIG_IGN can be ignored, except for the
28180 * special case of a SIGCHLD.
28181 *
28182 * Some signals with SIG_DFL default to a non-action. */
28183 static int ignored_signal(int sig, struct task_struct *t)
28184 {
28185 struct signal_struct *signals;
28186 struct k_sigaction *ka;
28187
28188 /* Don't ignore traced or blocked signals */
28189 if ((t->flags & PF_PTRACED) ||
28190 sigismember(&t->blocked, sig))
28191 return 0;
28192
28193 signals = t->sig;
28194 if (!signals)
28195 return 1;
28196
28197 ka = &signals->action[sig-1];
28198 switch ((unsigned long) ka->sa.sa_handler) {
28199 case (unsigned long) SIG_DFL:
28200 if (sig == SIGCONT ||
28201 sig == SIGWINCH ||
28202 sig == SIGCHLD ||
28203 sig == SIGURG)
28204 break;
28205 return 0;
28206
28207 case (unsigned long) SIG_IGN:
28208 if (sig != SIGCHLD)
28209 break;
28210 /* fallthrough */
28211 default:
28212 return 0;
28213 }
28214 return 1;
28215 }
28216
28217 int
28218 send_sig_info(int sig, struct siginfo *info,
28219 struct task_struct *t)
28220 {
28221 unsigned long flags;
28222 int ret;
28223
28224 #if DEBUG_SIG
28225 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
28226 #endif
28227
28228 ret = -EINVAL;
28229 if (sig < 0 || sig > _NSIG)
28230 goto out_nolock;
28231 /* The somewhat baroque permissions check... */
28232 ret = -EPERM;
28233 if ((!info || ((unsigned long)info != 1 &&
28234 SI_FROMUSER(info)))
28235 && ((sig != SIGCONT) ||
28236 (current->session != t->session))
28237 && (current->euid ^ t->suid)
28238 && (current->euid ^ t->uid)
28239 && (current->uid ^ t->suid)
28240 && (current->uid ^ t->uid)
28241 && !capable(CAP_SYS_ADMIN))
28242 goto out_nolock;
28243
28244 /* The null signal is a permissions and process
28245 * existence probe. No signal is actually delivered.
28246 * Same goes for zombies. */
28247 ret = 0;
28248 if (!sig || !t->sig)
28249 goto out_nolock;
28250
28251 spin_lock_irqsave(&t->sigmask_lock, flags);
28252 switch (sig) {
28253 case SIGKILL: case SIGCONT:
28254 /* Wake up the process if stopped. */
28255 if (t->state == TASK_STOPPED)
28256 wake_up_process(t);
28257 t->exit_code = 0;
28258 sigdelsetmask(&t->signal,
28259 (sigmask(SIGSTOP)|sigmask(SIGTSTP) |
28260 sigmask(SIGTTOU)|sigmask(SIGTTIN)));
28261 /* Inflict this corner case with recalculations, not
28262 * mainline */
28263 recalc_sigpending(t);
28264 break;
28265
28266 case SIGSTOP: case SIGTSTP:
28267 case SIGTTIN: case SIGTTOU:
28268 /* If we're stopping again, cancel SIGCONT */
28269 sigdelset(&t->signal, SIGCONT);
28270 /* Inflict this corner case with recalculations, not
28271 * mainline */
28272 recalc_sigpending(t);
28273 break;
28274 }
28275
28276 /* Optimize away the signal, if it's a signal that can
28277 * be handled immediately (ie non-blocked and untraced)
28278 * and that is ignored (either explicitly or by
28279 * default). */
28280
28281 if (ignored_signal(sig, t))
28282 goto out;
28283
28284 if (sig < SIGRTMIN) {
28285 /* Non-real-time signals are not queued. */
28286 /* XXX: As an extension, support queueing exactly one
28287 * non-rt signal if SA_SIGINFO is set, so that we can
28288 * get more detailed information about the cause of
28289 * the signal. */
28290 if (sigismember(&t->signal, sig))
28291 goto out;
28292 } else {
28293 /* Real-time signals must be queued if sent by
28294 * sigqueue, or some other real-time mechanism. It
28295 * is implementation defined whether kill() does so.
28296 * We attempt to do so, on the principle of least
28297 * surprise, but since kill is not allowed to fail
28298 * with EAGAIN when low on memory we just make sure
28299 * at least one signal gets delivered and don't pass
28300 * on the info struct. */
28301
28302 struct signal_queue *q = 0;
28303
28304 if (nr_queued_signals < max_queued_signals) {
28305 q = (struct signal_queue *)
28306 kmem_cache_alloc(signal_queue_cachep,
28307 GFP_KERNEL);
28308 }
28309
28310 if (q) {
28311 nr_queued_signals++;
28312 q->next = NULL;
28313 *t->sigqueue_tail = q;
28314 t->sigqueue_tail = &q->next;
28315 switch ((unsigned long) info) {
28316 case 0:
28317 q->info.si_signo = sig;
28318 q->info.si_errno = 0;
28319 q->info.si_code = SI_USER;
28320 q->info.si_pid = current->pid;
28321 q->info.si_uid = current->uid;
28322 break;
28323 case 1:
28324 q->info.si_signo = sig;
28325 q->info.si_errno = 0;
28326 q->info.si_code = SI_KERNEL;
28327 q->info.si_pid = 0;
28328 q->info.si_uid = 0;
28329 break;
28330 default:
28331 q->info = *info;
28332 break;
28333 }
28334 } else {
28335 /* If this was sent by a rt mechanism, try again.*/
28336 if (info->si_code < 0) {
28337 ret = -EAGAIN;
28338 goto out;
28339 }
28340 /* Otherwise, mention that the signal is pending,
28341 but don't queue the info. */
28342 }
28343 }
28344
28345 sigaddset(&t->signal, sig);
28346 if (!sigismember(&t->blocked, sig)) {
28347 t->sigpending = 1;
28348 #ifdef __SMP__
28349 /* If the task is running on a different CPU force a
28350 * reschedule on the other CPU - note that the code
28351 * below is a tad loose and might occasionally kick
28352 * the wrong CPU if we catch the process in the
28353 * process of changing - but no harm is done by that
28354 * other than doing an extra (lightweight) IPI
28355 * interrupt.
28356 *
28357 * note that we rely on the previous spin_lock to
28358 * lock interrupts for us! No need to set
28359 * need_resched since signal event passing goes
28360 * through ->blocked. */
28361 spin_lock(&runqueue_lock);
28362 if (t->has_cpu && t->processor != smp_processor_id())
28363 smp_send_reschedule(t->processor);
28364 spin_unlock(&runqueue_lock);
28365 #endif /* __SMP__ */
28366 }
28367
28368 out:
28369 spin_unlock_irqrestore(&t->sigmask_lock, flags);
28370 if(t->state == TASK_INTERRUPTIBLE && signal_pending(t))
28371 wake_up_process(t);
28372
28373 out_nolock:
28374 #if DEBUG_SIG
28375 printk(" %d -> %d\n", signal_pending(t), ret);
28376 #endif
28377
28378 return ret;
28379 }
28380
28381 /* Force a signal that the process can't ignore: if
28382 * necessary we unblock the signal and change any SIG_IGN
28383 * to SIG_DFL. */
28384
28385 int
28386 force_sig_info(int sig, struct siginfo *info,
28387 struct task_struct *t)
28388 {
28389 unsigned long int flags;
28390
28391 spin_lock_irqsave(&t->sigmask_lock, flags);
28392 if (t->sig == NULL) {
28393 spin_unlock_irqrestore(&t->sigmask_lock, flags);
28394 return -ESRCH;
28395 }
28396
28397 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
28398 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
28399 sigdelset(&t->blocked, sig);
28400 spin_unlock_irqrestore(&t->sigmask_lock, flags);
28401
28402 return send_sig_info(sig, info, t);
28403 }
28404
28405 /* kill_pg() sends a signal to a process group: this is
28406 * what the tty control characters do (^C, ^Z etc) */
28407 int
28408 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
28409 {
28410 int retval = -EINVAL;
28411 if (pgrp > 0) {
28412 struct task_struct *p;
28413 int found = 0;
28414
28415 retval = -ESRCH;
28416 read_lock(&tasklist_lock);
28417 for_each_task(p) {
28418 if (p->pgrp == pgrp) {
28419 int err = send_sig_info(sig, info, p);
28420 if (err != 0)
28421 retval = err;
28422 else
28423 found++;
28424 }
28425 }
28426 read_unlock(&tasklist_lock);
28427 if (found)
28428 retval = 0;
28429 }
28430 return retval;
28431 }
28432
28433 /* kill_sl() sends a signal to the session leader: this
28434 * is used to send SIGHUP to the controlling process of a
28435 * terminal when the connection is lost. */
28436 int
28437 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
28438 {
28439 int retval = -EINVAL;
28440 if (sess > 0) {
28441 struct task_struct *p;
28442 int found = 0;
28443
28444 retval = -ESRCH;
28445 read_lock(&tasklist_lock);
28446 for_each_task(p) {
28447 if (p->leader && p->session == sess) {
28448 int err = send_sig_info(sig, info, p);
28449 if (err)
28450 retval = err;
28451 else
28452 found++;
28453 }
28454 }
28455 read_unlock(&tasklist_lock);
28456 if (found)
28457 retval = 0;
28458 }
28459 return retval;
28460 }
28461
28462 inline int
28463 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
28464 {
28465 int error;
28466 struct task_struct *p;
28467
28468 read_lock(&tasklist_lock);
28469 p = find_task_by_pid(pid);
28470 error = -ESRCH;
28471 if (p)
28472 error = send_sig_info(sig, info, p);
28473 read_unlock(&tasklist_lock);
28474 return error;
28475 }
28476
28477 /* kill_something() interprets pid in interesting ways
28478 * just like kill(2).
28479 *
28480 * POSIX specifies that kill(-1,sig) is unspecified, but
28481 * what we have is probably wrong. Should make it like
28482 * BSD or SYSV. */
28483 int
28484 kill_something_info(int sig, struct siginfo *info,
28485 int pid)
28486 {
28487 if (!pid) {
28488 return kill_pg_info(sig, info, current->pgrp);
28489 } else if (pid == -1) {
28490 int retval = 0, count = 0;
28491 struct task_struct * p;
28492
28493 read_lock(&tasklist_lock);
28494 for_each_task(p) {
28495 if (p->pid > 1 && p != current) {
28496 int err = send_sig_info(sig, info, p);
28497 ++count;
28498 if (err != -EPERM)
28499 retval = err;
28500 }
28501 }
28502 read_unlock(&tasklist_lock);
28503 return count ? retval : -ESRCH;
28504 } else if (pid < 0) {
28505 return kill_pg_info(sig, info, -pid);
28506 } else {
28507 return kill_proc_info(sig, info, pid);
28508 }
28509 }
28510
28511 /* These are for backward compatibility with the rest of
28512 * the kernel source. */
28513 int
28514 send_sig(int sig, struct task_struct *p, int priv)
28515 {
28516 return send_sig_info(sig, (void*)(long)(priv != 0), p);
28517 }
28518
28519 void
28520 force_sig(int sig, struct task_struct *p)
28521 {
28522 force_sig_info(sig, (void*)1L, p);
28523 }
28524
28525 int
28526 kill_pg(pid_t pgrp, int sig, int priv)
28527 {
28528 return kill_pg_info(sig,
28529 (void *)(long)(priv != 0), pgrp);
28530 }
28531
28532 int
28533 kill_sl(pid_t sess, int sig, int priv)
28534 {
28535 return kill_sl_info(sig,
28536 (void *)(long)(priv != 0), sess);
28537 }
28538
28539 int
28540 kill_proc(pid_t pid, int sig, int priv)
28541 {
28542 return kill_proc_info(sig,
28543 (void *)(long)(priv != 0), pid);
28544 }
28545
28546 /* Let a parent know about a status change of a child. */
28547 void
28548 notify_parent(struct task_struct *tsk, int sig)
28549 {
28550 struct siginfo info;
28551 int why;
28552
28553 info.si_signo = sig;
28554 info.si_errno = 0;
28555 info.si_pid = tsk->pid;
28556
28557 /* FIXME: find out whether or not this is supposed to
28558 * be c*time. */
28559 info.si_utime = tsk->times.tms_utime;
28560 info.si_stime = tsk->times.tms_stime;
28561
28562 why = SI_KERNEL; /* shouldn't happen */
28563 switch (tsk->state) {
28564 case TASK_ZOMBIE:
28565 if (tsk->exit_code & 0x80)
28566 why = CLD_DUMPED;
28567 else if (tsk->exit_code & 0x7f)
28568 why = CLD_KILLED;
28569 else
28570 why = CLD_EXITED;
28571 break;
28572 case TASK_STOPPED:
28573 /* FIXME -- can we deduce CLD_TRAPPED or
28574 * CLD_CONTINUED? */
28575 why = CLD_STOPPED;
28576 break;
28577
28578 default:
28579 printk(KERN_DEBUG
28580 "eh? notify_parent with state %ld?\n",
28581 tsk->state);
28582 break;
28583 }
28584 info.si_code = why;
28585
28586 send_sig_info(sig, &info, tsk->p_pptr);
28587 wake_up_interruptible(&tsk->p_pptr->wait_chldexit);
28588 }
28589
28590 EXPORT_SYMBOL(dequeue_signal);
28591 EXPORT_SYMBOL(flush_signals);
28592 EXPORT_SYMBOL(force_sig);
28593 EXPORT_SYMBOL(force_sig_info);
28594 EXPORT_SYMBOL(kill_pg);
28595 EXPORT_SYMBOL(kill_pg_info);
28596 EXPORT_SYMBOL(kill_proc);
28597 EXPORT_SYMBOL(kill_proc_info);
28598 EXPORT_SYMBOL(kill_sl);
28599 EXPORT_SYMBOL(kill_sl_info);
28600 EXPORT_SYMBOL(notify_parent);
28601 EXPORT_SYMBOL(recalc_sigpending);
28602 EXPORT_SYMBOL(send_sig);
28603 EXPORT_SYMBOL(send_sig_info);
28604
28605
28606 /* System call entry points. */
28607
28608 /* We don't need to get the kernel lock - this is all
28609 * local to this particular thread.. (and that's good,
28610 * because this is _heavily_ used by various programs) */
28611 asmlinkage int
28612 sys_rt_sigprocmask(int how, sigset_t *set,
28613 sigset_t *oset, size_t sigsetsize)
28614 {
28615 int error = -EINVAL;
28616 sigset_t old_set, new_set;
28617
28618 /* XXX: Don't preclude handling different sized
28619 * sigset_t's. */
28620 if (sigsetsize != sizeof(sigset_t))
28621 goto out;
28622
28623 if (set) {
28624 error = -EFAULT;
28625 if (copy_from_user(&new_set, set, sizeof(*set)))
28626 goto out;
28627 sigdelsetmask(&new_set,
28628 sigmask(SIGKILL)|sigmask(SIGSTOP));
28629
28630 spin_lock_irq(¤t->sigmask_lock);
28631 old_set = current->blocked;
28632
28633 error = 0;
28634 switch (how) {
28635 default:
28636 error = -EINVAL;
28637 break;
28638 case SIG_BLOCK:
28639 sigorsets(&new_set, &old_set, &new_set);
28640 break;
28641 case SIG_UNBLOCK:
28642 signandsets(&new_set, &old_set, &new_set);
28643 break;
28644 case SIG_SETMASK:
28645 break;
28646 }
28647
28648 current->blocked = new_set;
28649 recalc_sigpending(current);
28650 spin_unlock_irq(¤t->sigmask_lock);
28651 if (error)
28652 goto out;
28653 if (oset)
28654 goto set_old;
28655 } else if (oset) {
28656 spin_lock_irq(¤t->sigmask_lock);
28657 old_set = current->blocked;
28658 spin_unlock_irq(¤t->sigmask_lock);
28659
28660 set_old:
28661 error = -EFAULT;
28662 if (copy_to_user(oset, &old_set, sizeof(*oset)))
28663 goto out;
28664 }
28665 error = 0;
28666 out:
28667 return error;
28668 }
28669
28670 asmlinkage int
28671 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
28672 {
28673 int error = -EINVAL;
28674 sigset_t pending;
28675
28676 /* XXX: Don't preclude handling different sized
28677 * sigset_t's. */
28678 if (sigsetsize != sizeof(sigset_t))
28679 goto out;
28680
28681 spin_lock_irq(¤t->sigmask_lock);
28682 sigandsets(&pending,
28683 ¤t->blocked, ¤t->signal);
28684 spin_unlock_irq(¤t->sigmask_lock);
28685
28686 error = -EFAULT;
28687 if (!copy_to_user(set, &pending, sizeof(*set)))
28688 error = 0;
28689 out:
28690 return error;
28691 }
28692
28693 asmlinkage int
28694 sys_rt_sigtimedwait(const sigset_t *uthese,
28695 siginfo_t *uinfo, const struct timespec *uts,
28696 size_t sigsetsize)
28697 {
28698 int ret, sig;
28699 sigset_t these;
28700 struct timespec ts;
28701 siginfo_t info;
28702 long timeout = 0;
28703
28704 /* XXX: Don't preclude handling different sized
28705 * sigset_t's. */
28706 if (sigsetsize != sizeof(sigset_t))
28707 return -EINVAL;
28708
28709 if (copy_from_user(&these, uthese, sizeof(these)))
28710 return -EFAULT;
28711 else {
28712 /* Invert the set of allowed signals to get those we
28713 * want to block. */
28714 signotset(&these);
28715 }
28716
28717 if (uts) {
28718 if (copy_from_user(&ts, uts, sizeof(ts)))
28719 return -EFAULT;
28720 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
28721 || ts.tv_sec < 0)
28722 return -EINVAL;
28723 }
28724
28725 spin_lock_irq(¤t->sigmask_lock);
28726 sig = dequeue_signal(&these, &info);
28727 if (!sig) {
28728 /* None ready -- temporarily unblock those we're
28729 * interested in so that we'll be awakened when they
28730 * arrive. */
28731 sigset_t oldblocked = current->blocked;
28732 sigandsets(¤t->blocked, ¤t->blocked,
28733 &these);
28734 recalc_sigpending(current);
28735 spin_unlock_irq(¤t->sigmask_lock);
28736
28737 timeout = MAX_SCHEDULE_TIMEOUT;
28738 if (uts)
28739 timeout = (timespec_to_jiffies(&ts)
28740 + (ts.tv_sec || ts.tv_nsec));
28741
28742 current->state = TASK_INTERRUPTIBLE;
28743 timeout = schedule_timeout(timeout);
28744
28745 spin_lock_irq(¤t->sigmask_lock);
28746 sig = dequeue_signal(&these, &info);
28747 current->blocked = oldblocked;
28748 recalc_sigpending(current);
28749 }
28750 spin_unlock_irq(¤t->sigmask_lock);
28751
28752 if (sig) {
28753 ret = sig;
28754 if (uinfo) {
28755 if (copy_to_user(uinfo, &info, sizeof(siginfo_t)))
28756 ret = -EFAULT;
28757 }
28758 } else {
28759 ret = -EAGAIN;
28760 if (timeout)
28761 ret = -EINTR;
28762 }
28763
28764 return ret;
28765 }
28766
28767 asmlinkage int
28768 sys_kill(int pid, int sig)
28769 {
28770 struct siginfo info;
28771
28772 info.si_signo = sig;
28773 info.si_errno = 0;
28774 info.si_code = SI_USER;
28775 info.si_pid = current->pid;
28776 info.si_uid = current->uid;
28777
28778 return kill_something_info(sig, &info, pid);
28779 }
28780
28781 asmlinkage int
28782 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
28783 {
28784 siginfo_t info;
28785
28786 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
28787 return -EFAULT;
28788
28789 /* Not even root can pretend to send signals from the
28790 * kernel. Nor can they impersonate a kill(), which
28791 * adds source info. */
28792 if (info.si_code >= 0)
28793 return -EPERM;
28794 info.si_signo = sig;
28795
28796 /* POSIX.1b doesn't mention process groups. */
28797 return kill_proc_info(sig, &info, pid);
28798 }
28799
28800 int
28801 do_sigaction(int sig, const struct k_sigaction *act,
28802 struct k_sigaction *oact)
28803 {
28804 struct k_sigaction *k;
28805
28806 if (sig < 1 || sig > _NSIG ||
28807 (act && (sig == SIGKILL || sig == SIGSTOP)))
28808 return -EINVAL;
28809
28810 spin_lock_irq(¤t->sigmask_lock);
28811 k = ¤t->sig->action[sig-1];
28812
28813 if (oact) *oact = *k;
28814
28815 if (act) {
28816 *k = *act;
28817 sigdelsetmask(&k->sa.sa_mask,
28818 sigmask(SIGKILL) | sigmask(SIGSTOP));
28819
28820 /* POSIX 3.3.1.3: "Setting a signal action to SIG_IGN
28821 * for a signal that is pending shall cause the
28822 * pending signal to be discarded, whether or not it
28823 * is blocked."
28824 *
28825 * "Setting a signal action to SIG_DFL for a signal
28826 * that is pending and whose default action is to
28827 * ignore the signal (for example, SIGCHLD), shall
28828 * cause the pending signal to be discarded, whether
28829 * or not it is blocked"
28830 *
28831 * Note the silly behaviour of SIGCHLD: SIG_IGN means
28832 * that the signal isn't actually ignored, but does
28833 * automatic child reaping, while SIG_DFL is
28834 * explicitly said by POSIX to force the signal to be
28835 * ignored. */
28836 if (k->sa.sa_handler == SIG_IGN
28837 || (k->sa.sa_handler == SIG_DFL
28838 && (sig == SIGCONT ||
28839 sig == SIGCHLD ||
28840 sig == SIGWINCH))) {
28841 /* So dequeue any that might be pending. XXX:
28842 * process-wide signals? */
28843 if (sig >= SIGRTMIN &&
28844 sigismember(¤t->signal, sig)) {
28845 struct signal_queue *q, **pp;
28846 pp = ¤t->sigqueue;
28847 q = current->sigqueue;
28848 while (q) {
28849 if (q->info.si_signo != sig)
28850 pp = &q->next;
28851 else {
28852 *pp = q->next;
28853 kmem_cache_free(signal_queue_cachep, q);
28854 nr_queued_signals--;
28855 }
28856 q = *pp;
28857 }
28858
28859 }
28860 sigdelset(¤t->signal, sig);
28861 recalc_sigpending(current);
28862 }
28863 }
28864
28865 spin_unlock_irq(¤t->sigmask_lock);
28866
28867 return 0;
28868 }
28869
28870 int
28871 do_sigaltstack(const stack_t *uss, stack_t *uoss,
28872 unsigned long sp)
28873 {
28874 stack_t oss;
28875 int error;
28876
28877 if (uoss) {
28878 oss.ss_sp = (void *) current->sas_ss_sp;
28879 oss.ss_size = current->sas_ss_size;
28880 oss.ss_flags = sas_ss_flags(sp);
28881 }
28882
28883 if (uss) {
28884 void *ss_sp;
28885 size_t ss_size;
28886 int ss_flags;
28887
28888 error = -EFAULT;
28889 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
28890 || __get_user(ss_sp, &uss->ss_sp)
28891 || __get_user(ss_flags, &uss->ss_flags)
28892 || __get_user(ss_size, &uss->ss_size))
28893 goto out;
28894
28895 error = -EPERM;
28896 if (on_sig_stack (sp))
28897 goto out;
28898
28899 error = -EINVAL;
28900 if (ss_flags & ~SS_DISABLE)
28901 goto out;
28902
28903 if (ss_flags & SS_DISABLE) {
28904 ss_size = 0;
28905 ss_sp = NULL;
28906 } else {
28907 error = -ENOMEM;
28908 if (ss_size < MINSIGSTKSZ)
28909 goto out;
28910 }
28911
28912 current->sas_ss_sp = (unsigned long) ss_sp;
28913 current->sas_ss_size = ss_size;
28914 }
28915
28916 if (uoss) {
28917 error = -EFAULT;
28918 if (copy_to_user(uoss, &oss, sizeof(oss)))
28919 goto out;
28920 }
28921
28922 error = 0;
28923 out:
28924 return error;
28925 }
28926
28927 #if !defined(__alpha__)
28928 /* Alpha has its own versions with special arguments. */
28929
28930 asmlinkage int
28931 sys_sigprocmask(int how, old_sigset_t *set,
28932 old_sigset_t *oset)
28933 {
28934 int error;
28935 old_sigset_t old_set, new_set;
28936
28937 if (set) {
28938 error = -EFAULT;
28939 if (copy_from_user(&new_set, set, sizeof(*set)))
28940 goto out;
28941 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
28942
28943 spin_lock_irq(¤t->sigmask_lock);
28944 old_set = current->blocked.sig[0];
28945
28946 error = 0;
28947 switch (how) {
28948 default:
28949 error = -EINVAL;
28950 break;
28951 case SIG_BLOCK:
28952 sigaddsetmask(¤t->blocked, new_set);
28953 break;
28954 case SIG_UNBLOCK:
28955 sigdelsetmask(¤t->blocked, new_set);
28956 break;
28957 case SIG_SETMASK:
28958 current->blocked.sig[0] = new_set;
28959 break;
28960 }
28961
28962 recalc_sigpending(current);
28963 spin_unlock_irq(¤t->sigmask_lock);
28964 if (error)
28965 goto out;
28966 if (oset)
28967 goto set_old;
28968 } else if (oset) {
28969 old_set = current->blocked.sig[0];
28970 set_old:
28971 error = -EFAULT;
28972 if (copy_to_user(oset, &old_set, sizeof(*oset)))
28973 goto out;
28974 }
28975 error = 0;
28976 out:
28977 return error;
28978 }
28979
28980 asmlinkage int
28981 sys_sigpending(old_sigset_t *set)
28982 {
28983 int error;
28984 old_sigset_t pending;
28985
28986 spin_lock_irq(¤t->sigmask_lock);
28987 pending =
28988 current->blocked.sig[0] & current->signal.sig[0];
28989 spin_unlock_irq(¤t->sigmask_lock);
28990
28991 error = -EFAULT;
28992 if (!copy_to_user(set, &pending, sizeof(*set)))
28993 error = 0;
28994 return error;
28995 }
28996
28997 #ifndef __sparc__
28998 asmlinkage int
28999 sys_rt_sigaction(int sig, const struct sigaction *act,
29000 struct sigaction *oact, size_t sigsetsize)
29001 {
29002 struct k_sigaction new_sa, old_sa;
29003 int ret = -EINVAL;
29004
29005 /* XXX: Don't preclude handling different sized
29006 * sigset_t's. */
29007 if (sigsetsize != sizeof(sigset_t))
29008 goto out;
29009
29010 if (act) {
29011 if (copy_from_user(&new_sa.sa,act,sizeof(new_sa.sa)))
29012 return -EFAULT;
29013 }
29014
29015 ret = do_sigaction(sig, act ? &new_sa : NULL,
29016 oact ? &old_sa : NULL);
29017
29018 if (!ret && oact) {
29019 if (copy_to_user(oact, &old_sa.sa,sizeof(old_sa.sa)))
29020 return -EFAULT;
29021 }
29022 out:
29023 return ret;
29024 }
29025 #endif /* __sparc__ */
29026 #endif
29027
29028 #if !defined(__alpha__)
29029 /* For backwards compatibility. Functionality superseded
29030 * by sigprocmask. */
29031 asmlinkage int
29032 sys_sgetmask(void)
29033 {
29034 /* SMP safe */
29035 return current->blocked.sig[0];
29036 }
29037
29038 asmlinkage int
29039 sys_ssetmask(int newmask)
29040 {
29041 int old;
29042
29043 spin_lock_irq(¤t->sigmask_lock);
29044 old = current->blocked.sig[0];
29045
29046 siginitset(¤t->blocked,
29047 newmask & ~(sigmask(SIGKILL)|sigmask(SIGSTOP)));
29048 recalc_sigpending(current);
29049 spin_unlock_irq(¤t->sigmask_lock);
29050
29051 return old;
29052 }
29053
29054 /* For backwards compatibility. Functionality superseded
29055 * by sigaction. */
29056 asmlinkage unsigned long
29057 sys_signal(int sig, __sighandler_t handler)
29058 {
29059 struct k_sigaction new_sa, old_sa;
29060 int ret;
29061
29062 new_sa.sa.sa_handler = handler;
29063 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
29064
29065 ret = do_sigaction(sig, &new_sa, &old_sa);
29066
29067 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
29068 }
29069 #endif /* !alpha */
Сайт управляется системой
uCoz