include/asm-i386/spinlock.h
12582 #ifndef __ASM_SPINLOCK_H
12583 #define __ASM_SPINLOCK_H
12584
12585 #ifndef __SMP__
12586
12587 /* 0 == no debugging, 1 == maintain lock state, 2 == full
12588 * debug */
12589 #define DEBUG_SPINLOCKS 0
12590
12591 #if (DEBUG_SPINLOCKS < 1)
12592
12593 /* Your basic spinlocks, allowing only a single CPU
12594 * anywhere
12595 *
12596 * Gcc-2.7.x has a nasty bug with empty initializers. */
12597 #if (__GNUC__ > 2) || \
12598 (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
12599 typedef struct { } spinlock_t;
12600 #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
12601 #else
12602 typedef struct { int gcc_is_buggy; } spinlock_t;
12603 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
12604 #endif
12605
12606 #define spin_lock_init(lock) do { } while(0)
12607 #define spin_lock(lock) do { } while(0)
12608 #define spin_trylock(lock) (1)
12609 #define spin_unlock_wait(lock) do { } while(0)
12610 #define spin_unlock(lock) do { } while(0)
12611 #define spin_lock_irq(lock) cli()
12612 #define spin_unlock_irq(lock) sti()
12613
12614 #define spin_lock_irqsave(lock, flags) \
12615 do { save_flags(flags); cli(); } while (0)
12616 #define spin_unlock_irqrestore(lock, flags) \
12617 restore_flags(flags)
12618
12619 #elif (DEBUG_SPINLOCKS < 2)
12620
12621 typedef struct {
12622 volatile unsigned int lock;
12623 } spinlock_t;
12624 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
12625
12626 #define spin_lock_init(x) do { (x)->lock = 0; } while (0)
12627 #define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
12628
12629 #define spin_lock(x) do { (x)->lock = 1; } while (0)
12630 #define spin_unlock_wait(x) do { } while (0)
12631 #define spin_unlock(x) do { (x)->lock = 0; } while (0)
12632 #define spin_lock_irq(x) \
12633 do { cli(); spin_lock(x); } while (0)
12634 #define spin_unlock_irq(x) \
12635 do { spin_unlock(x); sti(); } while (0)
12636
12637 #define spin_lock_irqsave(x, flags) \
12638 do { save_flags(flags); spin_lock_irq(x); } while (0)
12639 #define spin_unlock_irqrestore(x, flags) \
12640 do { spin_unlock(x); restore_flags(flags); } while (0)
12641
12642 #else /* (DEBUG_SPINLOCKS >= 2) */
12643
12644 typedef struct {
12645 volatile unsigned int lock;
12646 volatile unsigned int babble;
12647 const char *module;
12648 } spinlock_t;
12649 #define SPIN_LOCK_UNLOCKED \
12650 (spinlock_t) { 0, 25, __BASE_FILE__ }
12651
12652 #include <linux/kernel.h>
12653
12654 #define spin_lock_init(x) do { (x)->lock = 0; } while (0)
12655 #define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
12656
12657 #define spin_lock(x) \
12658 do { \
12659 unsigned long __spinflags; \
12660 save_flags(__spinflags); \
12661 cli(); \
12662 if ((x)->lock&&(x)->babble) { \
12663 printk("%s:%d: spin_lock(%s:%p) already locked\n", \
12664 __BASE_FILE__,__LINE__, (x)->module, (x)); \
12665 (x)->babble--; \
12666 } \
12667 (x)->lock = 1; \
12668 restore_flags(__spinflags); \
12669 } while (0)
12670 #define spin_unlock_wait(x) \
12671 do { \
12672 unsigned long __spinflags; \
12673 save_flags(__spinflags); \
12674 cli(); \
12675 if ((x)->lock&&(x)->babble) { \
12676 printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", \
12677 __BASE_FILE__,__LINE__, (x)->module, (x)); \
12678 (x)->babble--; \
12679 } \
12680 restore_flags(__spinflags); \
12681 } while (0)
12682 #define spin_unlock(x) \
12683 do { \
12684 unsigned long __spinflags; \
12685 save_flags(__spinflags); \
12686 cli(); \
12687 if (!(x)->lock&&(x)->babble) { \
12688 printk("%s:%d: spin_unlock(%s:%p) not locked\n", \
12689 __BASE_FILE__,__LINE__, (x)->module, (x)); \
12690 (x)->babble--; \
12691 } \
12692 (x)->lock = 0; \
12693 restore_flags(__spinflags); \
12694 } while (0)
12695 #define spin_lock_irq(x) \
12696 do { \
12697 cli(); \
12698 if ((x)->lock&&(x)->babble) { \
12699 printk("%s:%d: spin_lock_irq(%s:%p) already locked" \
12700 "\n", __BASE_FILE__,__LINE__, (x)->module, (x)); \
12701 (x)->babble--; \
12702 } \
12703 (x)->lock = 1; \
12704 } while (0)
12705 #define spin_unlock_irq(x) \
12706 do { \
12707 cli(); \
12708 if (!(x)->lock&&(x)->babble) { \
12709 printk("%s:%d: spin_lock(%s:%p) not locked\n", \
12710 __BASE_FILE__,__LINE__, (x)->module, (x)); \
12711 (x)->babble--; \
12712 } \
12713 (x)->lock = 0; \
12714 sti(); \
12715 } while (0)
12716 #define spin_lock_irqsave(x,flags) \
12717 do { \
12718 save_flags(flags); \
12719 cli(); \
12720 if ((x)->lock&&(x)->babble) { \
12721 printk("%s:%d: spin_lock_irqsave(%s:%p) already " \
12722 "locked\n", __BASE_FILE__,__LINE__, \
12723 (x)->module, (x)); \
12724 (x)->babble--; \
12725 } \
12726 (x)->lock = 1; \
12727 } while (0)
12728 #define spin_unlock_irqrestore(x,flags) \
12729 do { \
12730 cli(); \
12731 if (!(x)->lock&&(x)->babble) { \
12732 printk("%s:%d: spin_unlock_irqrestore(%s:%p) " \
12733 "not locked\n", __BASE_FILE__,__LINE__, \
12734 (x)->module, (x)); \
12735 (x)->babble--; \
12736 } \
12737 (x)->lock = 0; \
12738 restore_flags(flags); \
12739 } while (0)
12740
12741 #endif /* DEBUG_SPINLOCKS */
12742
12743 /* Read-write spinlocks, allowing multiple readers but
12744 * only one writer.
12745 *
12746 * NOTE! it is quite common to have readers in interrupts
12747 * but no interrupt writers. For those circumstances we
12748 * can "mix" irq-safe locks - any writer needs to get a
12749 * irq-safe write-lock, but readers can get non-irqsafe
12750 * read-locks.
12751 *
12752 * Gcc-2.7.x has a nasty bug with empty initializers. */
12753 #if (__GNUC__ > 2) || \
12754 (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
12755 typedef struct { } rwlock_t;
12756 #define RW_LOCK_UNLOCKED (rwlock_t) { }
12757 #else
12758 typedef struct { int gcc_is_buggy; } rwlock_t;
12759 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
12760 #endif
12761
12762 #define read_lock(lock) do { } while(0)
12763 #define read_unlock(lock) do { } while(0)
12764 #define write_lock(lock) do { } while(0)
12765 #define write_unlock(lock) do { } while(0)
12766 #define read_lock_irq(lock) cli()
12767 #define read_unlock_irq(lock) sti()
12768 #define write_lock_irq(lock) cli()
12769 #define write_unlock_irq(lock) sti()
12770
12771 #define read_lock_irqsave(lock, flags) \
12772 do { save_flags(flags); cli(); } while (0)
12773 #define read_unlock_irqrestore(lock, flags) \
12774 restore_flags(flags)
12775 #define write_lock_irqsave(lock, flags) \
12776 do { save_flags(flags); cli(); } while (0)
12777 #define write_unlock_irqrestore(lock, flags) \
12778 restore_flags(flags)
12779
12780 #else /* __SMP__ */
12781
12782 /* Your basic spinlocks, allowing only a single CPU
12783 * anywhere */
12784
12785 typedef struct {
12786 volatile unsigned int lock;
12787 } spinlock_t;
12788
12789 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
12790
12791 #define spin_lock_init(x) do { (x)->lock = 0; } while(0)
12792 /* Simple spin lock operations. There are two variants,
12793 * one clears IRQ's on the local processor, one does not.
12794 *
12795 * We make no fairness assumptions. They have a cost. */
12796
12797 #define spin_unlock_wait(x) \
12798 do { \
12799 barrier(); \
12800 } while(((volatile spinlock_t *)(x))->lock)
12801
12802 typedef struct { unsigned long a[100]; } __dummy_lock_t;
12803 #define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
12804
12805 #define spin_lock_string \
12806 "\n1:\t" \
12807 "lock ; btsl $0,%0\n\t" \
12808 "jc 2f\n" \
12809 ".section .text.lock,\"ax\"\n" \
12810 "2:\t" \
12811 "testb $1,%0\n\t" \
12812 "jne 2b\n\t" \
12813 "jmp 1b\n" \
12814 ".previous"
12815
12816 #define spin_unlock_string \
12817 "lock ; btrl $0,%0"
12818
12819 #define spin_lock(lock) \
12820 __asm__ __volatile__( \
12821 spin_lock_string \
12822 :"=m" (__dummy_lock(lock)))
12823
12824 #define spin_unlock(lock) \
12825 __asm__ __volatile__( \
12826 spin_unlock_string \
12827 :"=m" (__dummy_lock(lock)))
12828
12829 #define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
12830
12831 #define spin_lock_irq(lock) \
12832 do { __cli(); spin_lock(lock); } while (0)
12833
12834 #define spin_unlock_irq(lock) \
12835 do { spin_unlock(lock); __sti(); } while (0)
12836
12837 #define spin_lock_irqsave(lock, flags) \
12838 do { __save_flags(flags); __cli(); \
12839 spin_lock(lock); } while (0)
12840
12841 #define spin_unlock_irqrestore(lock, flags) \
12842 do { spin_unlock(lock); \
12843 __restore_flags(flags); } while (0)
12844
12845 /* Read-write spinlocks, allowing multiple readers but
12846 * only one writer.
12847 *
12848 * NOTE! it is quite common to have readers in interrupts
12849 * but no interrupt writers. For those circumstances we
12850 * can "mix" irq-safe locks - any writer needs to get a
12851 * irq-safe write-lock, but readers can get non-irqsafe
12852 * read-locks. */
12853 typedef struct {
12854 volatile unsigned int lock;
12855 unsigned long previous;
12856 } rwlock_t;
12857
12858 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
12859
12860 /* On x86, we implement read-write locks as a 32-bit
12861 * counter with the high bit (sign) being the "write"
12862 * bit.
12863 *
12864 * The inline assembly is non-obvious. Think about it. */
12865 #define read_lock(rw) \
12866 asm volatile("\n1:\t" \
12867 "lock ; incl %0\n\t" \
12868 "js 2f\n" \
12869 ".section .text.lock,\"ax\"\n" \
12870 "2:\tlock ; decl %0\n" \
12871 "3:\tcmpl $0,%0\n\t" \
12872 "js 3b\n\t" \
12873 "jmp 1b\n" \
12874 ".previous" \
12875 :"=m" (__dummy_lock(&(rw)->lock)))
12876
12877 #define read_unlock(rw) \
12878 asm volatile("lock ; decl %0" \
12879 :"=m" (__dummy_lock(&(rw)->lock)))
12880
12881 #define write_lock(rw) \
12882 asm volatile("\n1:\t" \
12883 "lock ; btsl $31,%0\n\t" \
12884 "jc 4f\n" \
12885 "2:\ttestl $0x7fffffff,%0\n\t" \
12886 "jne 3f\n" \
12887 ".section .text.lock,\"ax\"\n" \
12888 "3:\tlock ; btrl $31,%0\n" \
12889 "4:\tcmp $0,%0\n\t" \
12890 "jne 4b\n\t" \
12891 "jmp 1b\n" \
12892 ".previous" \
12893 :"=m" (__dummy_lock(&(rw)->lock)))
12894
12895 #define write_unlock(rw) \
12896 asm volatile("lock ; btrl $31,%0":"=m" \
12897 (__dummy_lock(&(rw)->lock)))
12898
12899 #define read_lock_irq(lock) \
12900 do { __cli(); read_lock(lock); } while (0)
12901 #define read_unlock_irq(lock) \
12902 do { read_unlock(lock); __sti(); } while (0)
12903 #define write_lock_irq(lock) \
12904 do { __cli(); write_lock(lock); } while (0)
12905 #define write_unlock_irq(lock) \
12906 do { write_unlock(lock); __sti(); } while (0)
12907
12908 #define read_lock_irqsave(lock, flags) \
12909 do { __save_flags(flags); __cli(); \
12910 read_lock(lock); } while (0)
12911 #define read_unlock_irqrestore(lock, flags) \
12912 do { read_unlock(lock); \
12913 __restore_flags(flags); } while (0)
12914 #define write_lock_irqsave(lock, flags) \
12915 do { __save_flags(flags); __cli(); \
12916 write_lock(lock); } while (0)
12917 #define write_unlock_irqrestore(lock, flags) \
12918 do { write_unlock(lock); \
12919 __restore_flags(flags); } while (0)
12920
12921 #endif /* __SMP__ */
12922 #endif /* __ASM_SPINLOCK_H */
Сайт управляется системой
uCoz