include/linux/tqueue.h
18527 /*
18528 * tqueue.h --- task queue handling for Linux.
18529 *
18530 * Mostly based on a proposed bottom-half replacement
18531 * code written by Kai Petzke,
18532 * wpp@marie.physik.tu-berlin.de.
18533 *
18534 * Modified for use in the Linux kernel by Theodore Ts'o,
18535 * tytso@mit.edu. Any bugs are my fault, not Kai's.
18536 *
18537 * The original comment follows below. */
18538
18539 #ifndef _LINUX_TQUEUE_H
18540 #define _LINUX_TQUEUE_H
18541
18542 #include <asm/bitops.h>
18543 #include <asm/system.h>
18544 #include <asm/spinlock.h>
18545
18546 /* New proposed "bottom half" handlers:
18547 * (C) 1994 Kai Petzke, wpp@marie.physik.tu-berlin.de
18548 *
18549 * Advantages:
18550 * - Bottom halfs are implemented as a linked list. You
18551 * can have as many of them, as you want.
18552 * - No more scanning of a bit field is required upon
18553 * call of a bottom half.
18554 * - Support for chained bottom half lists. The
18555 * run_task_queue() function can be used as a bottom half
18556 * handler. This is for example useful for bottom halfs,
18557 * which want to be delayed until the next clock tick.
18558 *
18559 * Problems:
18560 * - The queue_task_irq() inline function is only atomic
18561 * with respect to itself. Problems can occur, when
18562 * queue_task_irq() is called from a normal system
18563 * call, and an interrupt comes in. No problems occur,
18564 * when queue_task_irq() is called from an interrupt or
18565 * bottom half, and interrupted, as run_task_queue()
18566 * will not be executed/continued before the last
18567 * interrupt returns. If in doubt, use queue_task(),
18568 * not queue_task_irq().
18569 * - Bottom halfs are called in the reverse order that
18570 * they were linked into the list. */
18571
18572 struct tq_struct {
18573 struct tq_struct *next; /* linked list of active BHs*/
18574 unsigned long sync; /* must be initialized to 0 */
18575 void (*routine)(void *); /* function to call */
18576 void *data; /* arg to function */
18577 };
18578
18579 typedef struct tq_struct * task_queue;
18580
18581 #define DECLARE_TASK_QUEUE(q) task_queue q = NULL
18582
18583 extern task_queue tq_timer, tq_immediate, tq_scheduler,
18584 tq_disk;
18585
18586 /* To implement your own list of active bottom halfs, use
18587 * the following two definitions:
18588 *
18589 * struct tq_struct *my_bh = NULL;
18590 * struct tq_struct run_my_bh = {
18591 * 0, 0, (void (*)(void *)) run_task_queue, &my_bh
18592 * };
18593 *
18594 * To activate a bottom half on your list, use:
18595 *
18596 * queue_task(tq_pointer, &my_bh);
18597 *
18598 * To run the bottom halfs on your list put them on the
18599 * immediate list by:
18600 *
18601 * queue_task(&run_my_bh, &tq_immediate);
18602 *
18603 * This allows you to do deferred procession. For
18604 * example, you could have a bottom half list tq_timer,
18605 * which is marked active by the timer interrupt. */
18606
18607 extern spinlock_t tqueue_lock;
18608
18609 /* queue_task */
18610 extern __inline__ void queue_task(
18611 struct tq_struct *bh_pointer, task_queue *bh_list)
18612 {
18613 if (!test_and_set_bit(0,&bh_pointer->sync)) {
18614 unsigned long flags;
18615 spin_lock_irqsave(&tqueue_lock, flags);
18616 bh_pointer->next = *bh_list;
18617 *bh_list = bh_pointer;
18618 spin_unlock_irqrestore(&tqueue_lock, flags);
18619 }
18620 }
18621
18622 /* Call all "bottom halfs" on a given list. */
18623 extern __inline__ void run_task_queue(task_queue *list)
18624 {
18625 if (*list) {
18626 unsigned long flags;
18627 struct tq_struct *p;
18628
18629 spin_lock_irqsave(&tqueue_lock, flags);
18630 p = *list;
18631 *list = NULL;
18632 spin_unlock_irqrestore(&tqueue_lock, flags);
18633
18634 while (p) {
18635 void *arg;
18636 void (*f) (void *);
18637 struct tq_struct *save_p;
18638 arg = p -> data;
18639 f = p -> routine;
18640 save_p = p;
18641 p = p -> next;
18642 mb();
18643 save_p -> sync = 0;
18644 (*f)(arg);
18645 }
18646 }
18647 }
18648
18649 #endif /* _LINUX_TQUEUE_H */
Сайт управляется системой
uCoz