include/linux/sched.h
16107 #ifndef _LINUX_SCHED_H
16108 #define _LINUX_SCHED_H
16109
16110 #include <asm/param.h> /* for HZ */
16111
16112 extern unsigned long event;
16113
16114 #include <linux/binfmts.h>
16115 #include <linux/personality.h>
16116 #include <linux/tasks.h>
16117 #include <linux/kernel.h>
16118 #include <linux/types.h>
16119 #include <linux/times.h>
16120 #include <linux/timex.h>
16121
16122 #include <asm/system.h>
16123 #include <asm/semaphore.h>
16124 #include <asm/page.h>
16125
16126 #include <linux/smp.h>
16127 #include <linux/tty.h>
16128 #include <linux/sem.h>
16129 #include <linux/signal.h>
16130 #include <linux/securebits.h>
16131
16132 /* cloning flags: */
16133 /* signal mask to be sent at exit */
16134 #define CSIGNAL 0x000000ff
16135 /* set if VM shared between processes */
16136 #define CLONE_VM 0x00000100
16137 /* set if fs info shared between processes */
16138 #define CLONE_FS 0x00000200
16139 /* set if open files shared between processes */
16140 #define CLONE_FILES 0x00000400
16141 /* set if signal handlers shared */
16142 #define CLONE_SIGHAND 0x00000800
16143 /* set if pid shared */
16144 #define CLONE_PID 0x00001000
16145 /* set to let tracing continue on the child too */
16146 #define CLONE_PTRACE 0x00002000
16147 /* set if parent wants child to wake it on mm_release */
16148 #define CLONE_VFORK 0x00004000
16149
16150 /* These are the constant used to fake the fixed-point
16151 * load-average counting. Some notes:
16152 * - 11 bit fractions expand to 22 bits by the
16153 * multiplies: this gives a load-average precision of 10
16154 * bits integer + 11 bits fractional
16155 * - if you want to count load-averages more often, you
16156 * need more precision, or rounding will get you. With
16157 * 2-second counting freq, the EXP_n values would be
16158 * 1981, 2034 and 2043 if still using only 11 bit
16159 * fractions. */
16160 extern unsigned long avenrun[]; /* Load averages */
16161
16162 #define FSHIFT 11 /* # bits of precision */
16163 #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-pt */
16164 #define LOAD_FREQ (5*HZ) /* 5 sec intervals */
16165 #define EXP_1 1884 /*1/exp(5sec/1min) as FP*/
16166 #define EXP_5 2014 /* 1/exp(5sec/5min) */
16167 #define EXP_15 2037 /* 1/exp(5sec/15min) */
16168
16169 #define CALC_LOAD(load,exp,n) \
16170 load *= exp; \
16171 load += n*(FIXED_1-exp); \
16172 load >>= FSHIFT;
16173
16174 #define CT_TO_SECS(x) ((x) / HZ)
16175 #define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
16176
16177 extern int nr_running, nr_tasks;
16178 extern int last_pid;
16179
16180 #include <linux/fs.h>
16181 #include <linux/time.h>
16182 #include <linux/param.h>
16183 #include <linux/resource.h>
16184 #include <linux/timer.h>
16185
16186 #include <asm/processor.h>
16187
16188 #define TASK_RUNNING 0
16189 #define TASK_INTERRUPTIBLE 1
16190 #define TASK_UNINTERRUPTIBLE 2
16191 #define TASK_ZOMBIE 4
16192 #define TASK_STOPPED 8
16193 #define TASK_SWAPPING 16
16194
16195 /* Scheduling policies */
16196 #define SCHED_OTHER 0
16197 #define SCHED_FIFO 1
16198 #define SCHED_RR 2
16199
16200 /* This is an additional bit set when we want to yield
16201 * the CPU for one re-schedule.. */
16202 #define SCHED_YIELD 0x10
16203
16204 struct sched_param {
16205 int sched_priority;
16206 };
16207
16208 #ifndef NULL
16209 #define NULL ((void *) 0)
16210 #endif
16211
16212 #ifdef __KERNEL__
16213
16214 #include <asm/spinlock.h>
16215
16216 /* This serializes "schedule()" and also protects the
16217 * run-queue from deletions/modifications (but _adding_
16218 * to the beginning of the run-queue has a separate
16219 * lock). */
16220 extern rwlock_t tasklist_lock;
16221 extern spinlock_t scheduler_lock;
16222 extern spinlock_t runqueue_lock;
16223
16224 extern void sched_init(void);
16225 extern void show_state(void);
16226 extern void trap_init(void);
16227
16228 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
16229 extern signed long
16230 FASTCALL(schedule_timeout(signed long timeout));
16231 asmlinkage void schedule(void);
16232
16233 /* Open file table structure */
16234 struct files_struct {
16235 atomic_t count;
16236 int max_fds;
16237 struct file ** fd; /* current fd array */
16238 fd_set close_on_exec;
16239 fd_set open_fds;
16240 };
16241
16242 #define INIT_FILES { \
16243 ATOMIC_INIT(1), \
16244 NR_OPEN, \
16245 &init_fd_array[0], \
16246 { { 0, } }, \
16247 { { 0, } } \
16248 }
16249
16250 struct fs_struct {
16251 atomic_t count;
16252 int umask;
16253 struct dentry * root, * pwd;
16254 };
16255
16256 #define INIT_FS { \
16257 ATOMIC_INIT(1), \
16258 0022, \
16259 NULL, NULL \
16260 }
16261
16262 /* Maximum number of active map areas.. This is a random
16263 * (large) number */
16264 #define MAX_MAP_COUNT (65536)
16265
16266 /* Number of map areas at which the AVL tree is
16267 * activated. This is arbitrary. */
16268 #define AVL_MIN_MAP_COUNT 32
16269
16270 struct mm_struct {
16271 struct vm_area_struct *mmap; /* list of VMAs */
16272 struct vm_area_struct *mmap_avl; /* tree of VMAs */
16273 /* last find_vma result */
16274 struct vm_area_struct *mmap_cache;
16275 pgd_t * pgd;
16276 atomic_t count;
16277 int map_count; /* number of VMAs */
16278 struct semaphore mmap_sem;
16279 unsigned long context;
16280 unsigned long start_code, end_code,
16281 start_data, end_data;
16282 unsigned long start_brk, brk, start_stack;
16283 unsigned long arg_start, arg_end, env_start, env_end;
16284 unsigned long rss, total_vm, locked_vm;
16285 unsigned long def_flags;
16286 unsigned long cpu_vm_mask;
16287 /* number of pages to swap on next pass */
16288 unsigned long swap_cnt;
16289 unsigned long swap_address;
16290 /* This is an architecture-specific pointer: the
16291 * portable part of Linux does not know about any
16292 * segments. */
16293 void * segments;
16294 };
16295
16296 #define INIT_MM { \
16297 &init_mmap, NULL, NULL, \
16298 swapper_pg_dir, \
16299 ATOMIC_INIT(1), 1, \
16300 MUTEX, \
16301 0, \
16302 0, 0, 0, 0, \
16303 0, 0, 0, \
16304 0, 0, 0, 0, \
16305 0, 0, 0, \
16306 0, 0, 0, 0, NULL }
16307
16308 struct signal_struct {
16309 atomic_t count;
16310 struct k_sigaction action[_NSIG];
16311 spinlock_t siglock;
16312 };
16313
16314 #define INIT_SIGNALS { \
16315 ATOMIC_INIT(1), \
16316 { {{0,}}, }, \
16317 SPIN_LOCK_UNLOCKED }
16318
16319 /* Some day this will be a full-fledged user tracking
16320 * system.. Right now it is only used to track how many
16321 * processes a user has, but it has the potential to
16322 * track memory usage etc. */
16323 struct user_struct;
16324
16325 struct task_struct {
16326 /* these are hardcoded - don't touch */
16327 /* -1 unrunnable, 0 runnable, >0 stopped */
16328 volatile long state;
16329 /* per process flags, defined below */
16330 unsigned long flags;
16331 int sigpending;
16332 mm_segment_t addr_limit;
16333 /* thread address space:
16334 * 0-0xBFFFFFFF for user-thead
16335 * 0-0xFFFFFFFF for kernel-thread */
16336 struct exec_domain *exec_domain;
16337 long need_resched;
16338
16339 /* various fields */
16340 long counter;
16341 long priority;
16342 cycles_t avg_slice;
16343 /* SMP and runqueue state */
16344 int has_cpu;
16345 int processor;
16346 int last_processor;
16347 /* Lock depth. We can context switch in and out of
16348 * holding a syscall kernel lock... */
16349 int lock_depth;
16350 struct task_struct *next_task, *prev_task;
16351 struct task_struct *next_run, *prev_run;
16352
16353 /* task state */
16354 struct linux_binfmt *binfmt;
16355 int exit_code, exit_signal;
16356 int pdeath_signal; /* Signal sent when parent dies */
16357 /* ??? */
16358 unsigned long personality;
16359 int dumpable:1;
16360 int did_exec:1;
16361 pid_t pid;
16362 pid_t pgrp;
16363 pid_t tty_old_pgrp;
16364 pid_t session;
16365 /* boolean value for session group leader */
16366 int leader;
16367 /* pointers to (original) parent process, youngest
16368 * child, younger sibling, older sibling, respectively.
16369 * (p->father can be replaced with p->p_pptr->pid) */
16370 struct task_struct *p_opptr, *p_pptr, *p_cptr,
16371 *p_ysptr, *p_osptr;
16372
16373 /* PID hash table linkage. */
16374 struct task_struct *pidhash_next;
16375 struct task_struct **pidhash_pprev;
16376
16377 /* Pointer to task[] array linkage. */
16378 struct task_struct **tarray_ptr;
16379
16380 struct wait_queue *wait_chldexit; /* for wait4() */
16381 struct semaphore *vfork_sem; /* for vfork() */
16382 unsigned long policy, rt_priority;
16383 unsigned long it_real_value, it_prof_value,
16384 it_virt_value;
16385 unsigned long it_real_incr, it_prof_incr, it_virt_incr;
16386 struct timer_list real_timer;
16387 struct tms times;
16388 unsigned long start_time;
16389 long per_cpu_utime[NR_CPUS], per_cpu_stime[NR_CPUS];
16390 /* mm fault and swap info: this can arguably be seen as
16391 either mm-specific or thread-specific */
16392 unsigned long min_flt, maj_flt, nswap,
16393 cmin_flt, cmaj_flt, cnswap;
16394 int swappable:1;
16395 /* process credentials */
16396 uid_t uid,euid,suid,fsuid;
16397 gid_t gid,egid,sgid,fsgid;
16398 int ngroups;
16399 gid_t groups[NGROUPS];
16400 kernel_cap_t cap_effective, cap_inheritable,
16401 cap_permitted;
16402 struct user_struct *user;
16403 /* limits */
16404 struct rlimit rlim[RLIM_NLIMITS];
16405 unsigned short used_math;
16406 char comm[16];
16407 /* file system info */
16408 int link_count;
16409 struct tty_struct *tty; /* NULL if no tty */
16410 /* ipc stuff */
16411 struct sem_undo *semundo;
16412 struct sem_queue *semsleeping;
16413 /* tss for this task */
16414 struct thread_struct tss;
16415 /* filesystem information */
16416 struct fs_struct *fs;
16417 /* open file information */
16418 struct files_struct *files;
16419 /* memory management info */
16420 struct mm_struct *mm;
16421
16422 /* signal handlers */
16423 spinlock_t sigmask_lock; /* Protects signal & blocked*/
16424 struct signal_struct *sig;
16425 sigset_t signal, blocked;
16426 struct signal_queue *sigqueue, **sigqueue_tail;
16427 unsigned long sas_ss_sp;
16428 size_t sas_ss_size;
16429 };
16430
16431 /* Per process flags */
16432 /* Print alignment warning msgs */
16433 #define PF_ALIGNWARN 0x00000001
16434 /* Not implemented yet, only for 486*/
16435 /* being created */
16436 #define PF_STARTING 0x00000002
16437 /* getting shut down */
16438 #define PF_EXITING 0x00000004
16439 /* set if ptrace (0) has been called */
16440 #define PF_PTRACED 0x00000010
16441 /* tracing system calls */
16442 #define PF_TRACESYS 0x00000020
16443 /* forked but didn't exec */
16444 #define PF_FORKNOEXEC 0x00000040
16445 /* used super-user privileges */
16446 #define PF_SUPERPRIV 0x00000100
16447 /* dumped core */
16448 #define PF_DUMPCORE 0x00000200
16449 /* killed by a signal */
16450 #define PF_SIGNALED 0x00000400
16451 /* Allocating memory */
16452 #define PF_MEMALLOC 0x00000800
16453 /* Wake up parent in mm_release */
16454 #define PF_VFORK 0x00001000
16455
16456 /* task used FPU this quantum (SMP) */
16457 #define PF_USEDFPU 0x00100000
16458 /* delayed trace (used on m68k, i386) */
16459 #define PF_DTRACE 0x00200000
16460
16461 /* Limit the stack by to some sane default: root can
16462 * always increase this limit if needed.. 8MB seems
16463 * reasonable. */
16464 #define _STK_LIM (8*1024*1024)
16465
16466 #define DEF_PRIORITY (20*HZ/100) /* 210-ms tm slices*/
16467
16468 /* INIT_TASK is used to set up the first task table,
16469 * touch at your own risk!. Base=0, limit=0x1fffff (=2MB)
16470 */
16471 #define INIT_TASK \
16472 /* state etc */{0,0,0,KERNEL_DS,&default_exec_domain,0, \
16473 /* counter */ DEF_PRIORITY,DEF_PRIORITY,0, \
16474 /* SMP */ 0,0,0,-1, \
16475 /* schedlink */ &init_task,&init_task, &init_task, \
16476 &init_task, \
16477 /* binfmt */ NULL, \
16478 /* ec,brk... */ 0,0,0,0,0,0, \
16479 /* pid etc.. */ 0,0,0,0,0, \
16480 /* proc links*/ &init_task,&init_task,NULL,NULL,NULL, \
16481 /* pidhash */ NULL, NULL, \
16482 /* tarray */ &task[0], \
16483 /* chld wait */ NULL, NULL, \
16484 /* timeout */ SCHED_OTHER,0,0,0,0,0,0,0, \
16485 /* timer */ { NULL, NULL, 0, 0, it_real_fn }, \
16486 /* utime */ {0,0,0,0},0, \
16487 /* per CPU times */ {0, }, {0, }, \
16488 /* flt */ 0,0,0,0,0,0, \
16489 /* swp */ 0, \
16490 /* process credentials */ \
16491 /* uid etc */ 0,0,0,0,0,0,0,0, \
16492 /* suppl grps*/ 0, {0,}, \
16493 /* caps */ CAP_INIT_EFF_SET,CAP_INIT_INH_SET, \
16494 CAP_FULL_SET, \
16495 /* user */ NULL, \
16496 /* rlimits */ INIT_RLIMITS, \
16497 /* math */ 0, \
16498 /* comm */ "swapper", \
16499 /* fs info */ 0,NULL, \
16500 /* ipc */ NULL, NULL, \
16501 /* tss */ INIT_TSS, \
16502 /* fs */ &init_fs, \
16503 /* files */ &init_files, \
16504 /* mm */ &init_mm, \
16505 /* signals */ SPIN_LOCK_UNLOCKED, &init_signals, \
16506 {{0}}, {{0}}, NULL, &init_task.sigqueue, 0, 0, \
16507 }
16508
16509 union task_union {
16510 struct task_struct task;
16511 unsigned long stack[2048];
16512 };
16513
16514 extern union task_union init_task_union;
16515
16516 extern struct mm_struct init_mm;
16517 extern struct task_struct *task[NR_TASKS];
16518
16519 extern struct task_struct **tarray_freelist;
16520 extern spinlock_t taskslot_lock;
16521
16522 extern __inline__ void
16523 add_free_taskslot(struct task_struct **t)
16524 {
16525 spin_lock(&taskslot_lock);
16526 *t = (struct task_struct *) tarray_freelist;
16527 tarray_freelist = t;
16528 spin_unlock(&taskslot_lock);
16529 }
16530
16531 extern __inline__ struct task_struct **
16532 get_free_taskslot(void)
16533 {
16534 struct task_struct **tslot;
16535
16536 spin_lock(&taskslot_lock);
16537 if((tslot = tarray_freelist) != NULL)
16538 tarray_freelist = (struct task_struct **) *tslot;
16539 spin_unlock(&taskslot_lock);
16540
16541 return tslot;
16542 }
16543
16544 /* PID hashing. */
16545 #define PIDHASH_SZ (NR_TASKS >> 2)
16546 extern struct task_struct *pidhash[PIDHASH_SZ];
16547
16548 #define pid_hashfn(x) \
16549 ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
16550
16551 extern __inline__ void hash_pid(struct task_struct *p)
16552 {
16553 struct task_struct **htable =
16554 &pidhash[pid_hashfn(p->pid)];
16555
16556 if((p->pidhash_next = *htable) != NULL)
16557 (*htable)->pidhash_pprev = &p->pidhash_next;
16558 *htable = p;
16559 p->pidhash_pprev = htable;
16560 }
16561
16562 extern __inline__ void unhash_pid(struct task_struct *p)
16563 {
16564 if(p->pidhash_next)
16565 p->pidhash_next->pidhash_pprev = p->pidhash_pprev;
16566 *p->pidhash_pprev = p->pidhash_next;
16567 }
16568
16569 extern __inline__ struct task_struct *
16570 find_task_by_pid(int pid)
16571 {
16572 struct task_struct *p, **htable =
16573 &pidhash[pid_hashfn(pid)];
16574
16575 for(p = *htable; p && p->pid != pid;
16576 p = p->pidhash_next)
16577 ;
16578
16579 return p;
16580 }
16581
16582 /* per-UID process charging. */
16583 extern int alloc_uid(struct task_struct *p);
16584 void free_uid(struct task_struct *p);
16585
16586 #include <asm/current.h>
16587
16588 extern unsigned long volatile jiffies;
16589 extern unsigned long itimer_ticks;
16590 extern unsigned long itimer_next;
16591 extern struct timeval xtime;
16592 extern void do_timer(struct pt_regs *);
16593
16594 extern unsigned int * prof_buffer;
16595 extern unsigned long prof_len;
16596 extern unsigned long prof_shift;
16597
16598 #define CURRENT_TIME (xtime.tv_sec)
16599
16600 extern void FASTCALL(__wake_up(struct wait_queue ** p,
16601 unsigned int mode));
16602 extern void FASTCALL(sleep_on(struct wait_queue ** p));
16603 extern long FASTCALL(sleep_on_timeout(
16604 struct wait_queue ** p, signed long timeout));
16605 extern void FASTCALL(interruptible_sleep_on(
16606 struct wait_queue ** p));
16607 extern long FASTCALL(interruptible_sleep_on_timeout(
16608 struct wait_queue ** p, signed long timeout));
16609 extern void FASTCALL(wake_up_process(
16610 struct task_struct * tsk));
16611
16612 #define wake_up(x) \
16613 __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
16614 #define wake_up_interruptible(x) \
16615 __wake_up((x),TASK_INTERRUPTIBLE)
16616
16617 extern int in_group_p(gid_t grp);
16618
16619 extern void flush_signals(struct task_struct *);
16620 extern void flush_signal_handlers(struct task_struct *);
16621 extern int dequeue_signal(sigset_t *block, siginfo_t *);
16622 extern int send_sig_info(int, struct siginfo *info,
16623 struct task_struct *);
16624 extern int force_sig_info(int, struct siginfo *info,
16625 struct task_struct *);
16626 extern int kill_pg_info(int, struct siginfo *info,pid_t);
16627 extern int kill_sl_info(int, struct siginfo *info,pid_t);
16628 extern int kill_proc_info(int, struct siginfo *info,
16629 pid_t);
16630 extern int kill_something_info(int, struct siginfo *info,
16631 int);
16632 extern void notify_parent(struct task_struct * tsk, int);
16633 extern void force_sig(int sig, struct task_struct * p);
16634 extern int send_sig(int sig, struct task_struct * p,
16635 int priv);
16636 extern int kill_pg(pid_t, int, int);
16637 extern int kill_sl(pid_t, int, int);
16638 extern int kill_proc(pid_t, int, int);
16639 extern int do_sigaction(int sig,
16640 const struct k_sigaction *act, struct k_sigaction *oact);
16641 extern int do_sigaltstack(const stack_t *ss,
16642 stack_t *oss, unsigned long sp);
16643
16644 extern inline int signal_pending(struct task_struct *p)
16645 {
16646 return (p->sigpending != 0);
16647 }
16648
16649 /* Reevaluate whether the task has signals pending
16650 * delivery. This is required every time the blocked
16651 * sigset_t changes. All callers should have
16652 * t->sigmask_lock. */
16653
16654 static inline void recalc_sigpending(
16655 struct task_struct *t)
16656 {
16657 unsigned long ready;
16658 long i;
16659
16660 switch (_NSIG_WORDS) {
16661 default:
16662 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
16663 ready |= t->signal.sig[i] &~ t->blocked.sig[i];
16664 break;
16665
16666 case 4: ready = t->signal.sig[3] &~ t->blocked.sig[3];
16667 ready |= t->signal.sig[2] &~ t->blocked.sig[2];
16668 ready |= t->signal.sig[1] &~ t->blocked.sig[1];
16669 ready |= t->signal.sig[0] &~ t->blocked.sig[0];
16670 break;
16671
16672 case 2: ready = t->signal.sig[1] &~ t->blocked.sig[1];
16673 ready |= t->signal.sig[0] &~ t->blocked.sig[0];
16674 break;
16675
16676 case 1: ready = t->signal.sig[0] &~ t->blocked.sig[0];
16677 }
16678
16679 t->sigpending = (ready != 0);
16680 }
16681
16682 /* True if we are on the alternate signal stack. */
16683
16684 static inline int on_sig_stack(unsigned long sp)
16685 {
16686 return (sp >= current->sas_ss_sp
16687 && sp < current->sas_ss_sp + current->sas_ss_size);
16688 }
16689
16690 static inline int sas_ss_flags(unsigned long sp)
16691 {
16692 return (current->sas_ss_size == 0 ? SS_DISABLE
16693 : on_sig_stack(sp) ? SS_ONSTACK : 0);
16694 }
16695
16696 extern int request_irq(unsigned int irq,
16697 void (*handler)(int, void *, struct pt_regs *),
16698 unsigned long flags, const char *device, void *dev_id);
16699 extern void free_irq(unsigned int irq, void *dev_id);
16700
16701 /* This has now become a routine instead of a macro, it
16702 * sets a flag if it returns true (to do BSD-style
16703 * accounting where the process is flagged if it uses
16704 * root privs). The implication of this is that you
16705 * should do normal permissions checks first, and check
16706 * suser() last.
16707 *
16708 * [Dec 1997 -- Chris Evans] For correctness, the above
16709 * considerations need to be extended to fsuser(). This
16710 * is done, along with moving fsuser() checks to be last.
16711 *
16712 * These will be removed, but in the mean time, when the
16713 * SECURE_NOROOT flag is set, uids don't grant privilege.
16714 */
16715 extern inline int suser(void)
16716 {
16717 if (!issecure(SECURE_NOROOT) && current->euid == 0) {
16718 current->flags |= PF_SUPERPRIV;
16719 return 1;
16720 }
16721 return 0;
16722 }
16723
16724 extern inline int fsuser(void)
16725 {
16726 if (!issecure(SECURE_NOROOT) && current->fsuid == 0) {
16727 current->flags |= PF_SUPERPRIV;
16728 return 1;
16729 }
16730 return 0;
16731 }
16732
16733 /* capable() checks for a particular capability. New
16734 * privilege checks should use this interface, rather
16735 * than suser() or fsuser(). See
16736 * include/linux/capability.h for defined capabilities.*/
16737
16738 extern inline int capable(int cap)
16739 {
16740 #if 1 /* ok now */
16741 if (cap_raised(current->cap_effective, cap))
16742 #else
16743 if (cap_is_fs_cap(cap)
16744 ? current->fsuid == 0 : current->euid == 0)
16745 #endif
16746 {
16747 current->flags |= PF_SUPERPRIV;
16748 return 1;
16749 }
16750 return 0;
16751 }
16752
16753 /* Routines for handling mm_structs */
16754 extern struct mm_struct * mm_alloc(void);
16755 static inline void mmget(struct mm_struct * mm)
16756 {
16757 atomic_inc(&mm->count);
16758 }
16759 extern void mmput(struct mm_struct *);
16760 /* Remove the current tasks stale references to the old
16761 mm_struct */
16762 extern void mm_release(void);
16763
16764 extern int copy_thread(int, unsigned long,
16765 unsigned long, struct task_struct *, struct pt_regs *);
16766 extern void flush_thread(void);
16767 extern void exit_thread(void);
16768
16769 extern void exit_mm(struct task_struct *);
16770 extern void exit_fs(struct task_struct *);
16771 extern void exit_files(struct task_struct *);
16772 extern void exit_sighand(struct task_struct *);
16773
16774 extern int do_execve(char *, char **, char **,
16775 struct pt_regs *);
16776 extern int do_fork(unsigned long, unsigned long,
16777 struct pt_regs *);
16778
16779 /* The wait-queues are circular lists, and you have to be
16780 * *very* sure to keep them correct. Use only these two
16781 * functions to add/remove entries in the queues. */
16782 extern inline void __add_wait_queue(
16783 struct wait_queue ** p, struct wait_queue * wait)
16784 {
16785 wait->next = *p ? : WAIT_QUEUE_HEAD(p);
16786 *p = wait;
16787 }
16788
16789 extern rwlock_t waitqueue_lock;
16790
16791 extern inline void add_wait_queue(struct wait_queue ** p,
16792 struct wait_queue * wait)
16793 {
16794 unsigned long flags;
16795
16796 write_lock_irqsave(&waitqueue_lock, flags);
16797 __add_wait_queue(p, wait);
16798 write_unlock_irqrestore(&waitqueue_lock, flags);
16799 }
16800
16801 extern inline void __remove_wait_queue(
16802 struct wait_queue ** p, struct wait_queue * wait)
16803 {
16804 struct wait_queue * next = wait->next;
16805 struct wait_queue * head = next;
16806 struct wait_queue * tmp;
16807
16808 while ((tmp = head->next) != wait) {
16809 head = tmp;
16810 }
16811 head->next = next;
16812 }
16813
16814 extern inline void remove_wait_queue(
16815 struct wait_queue ** p, struct wait_queue * wait)
16816 {
16817 unsigned long flags;
16818
16819 write_lock_irqsave(&waitqueue_lock, flags);
16820 __remove_wait_queue(p, wait);
16821 write_unlock_irqrestore(&waitqueue_lock, flags);
16822 }
16823
16824 #define __wait_event(wq, condition) \
16825 do { \
16826 struct wait_queue __wait; \
16827 \
16828 __wait.task = current; \
16829 add_wait_queue(&wq, &__wait); \
16830 for (;;) { \
16831 current->state = TASK_UNINTERRUPTIBLE; \
16832 if (condition) \
16833 break; \
16834 schedule(); \
16835 } \
16836 current->state = TASK_RUNNING; \
16837 remove_wait_queue(&wq, &__wait); \
16838 } while (0)
16839
16840 #define wait_event(wq, condition) \
16841 do { \
16842 if (condition) \
16843 break; \
16844 __wait_event(wq, condition); \
16845 } while (0)
16846
16847 #define __wait_event_interruptible(wq, condition, ret) \
16848 do { \
16849 struct wait_queue __wait; \
16850 \
16851 __wait.task = current; \
16852 add_wait_queue(&wq, &__wait); \
16853 for (;;) { \
16854 current->state = TASK_INTERRUPTIBLE; \
16855 if (condition) \
16856 break; \
16857 if (!signal_pending(current)) { \
16858 schedule(); \
16859 continue; \
16860 } \
16861 ret = -ERESTARTSYS; \
16862 break; \
16863 } \
16864 current->state = TASK_RUNNING; \
16865 remove_wait_queue(&wq, &__wait); \
16866 } while (0)
16867
16868 #define wait_event_interruptible(wq, condition) \
16869 ({ \
16870 int __ret = 0; \
16871 if (!(condition)) \
16872 __wait_event_interruptible(wq, condition, __ret); \
16873 __ret; \
16874 })
16875
16876 #define REMOVE_LINKS(p) do { \
16877 (p)->next_task->prev_task = (p)->prev_task; \
16878 (p)->prev_task->next_task = (p)->next_task; \
16879 if ((p)->p_osptr) \
16880 (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
16881 if ((p)->p_ysptr) \
16882 (p)->p_ysptr->p_osptr = (p)->p_osptr; \
16883 else \
16884 (p)->p_pptr->p_cptr = (p)->p_osptr; \
16885 } while (0)
16886
16887 #define SET_LINKS(p) do { \
16888 (p)->next_task = &init_task; \
16889 (p)->prev_task = init_task.prev_task; \
16890 init_task.prev_task->next_task = (p); \
16891 init_task.prev_task = (p); \
16892 (p)->p_ysptr = NULL; \
16893 if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
16894 (p)->p_osptr->p_ysptr = p; \
16895 (p)->p_pptr->p_cptr = p; \
16896 } while (0)
16897
16898 #define for_each_task(p) \
16899 for(p = &init_task; (p = p->next_task) != &init_task; )
16900
16901 #endif /* __KERNEL__ */
16902
16903 #endif
Сайт управляется системой
uCoz