include/asm-i386/system.h
12923 #ifndef __ASM_SYSTEM_H
12924 #define __ASM_SYSTEM_H
12925
12926 #include <linux/kernel.h>
12927 #include <asm/segment.h>
12928
12929 #ifdef __KERNEL__
12930
12931 /* one of the stranger aspects of C forward decls. */
12932 struct task_struct;
12933 extern void
12934 FASTCALL(__switch_to(struct task_struct *prev,
12935 struct task_struct *next));
12936
12937 /* We do most of the task switching in C, but we need to
12938 * do the EIP/ESP switch in assembly.. */
12939 #define switch_to(prev,next) do { \
12940 unsigned long eax, edx, ecx; \
12941 asm volatile("pushl %%ebx\n\t" \
12942 "pushl %%esi\n\t" \
12943 "pushl %%edi\n\t" \
12944 "pushl %%ebp\n\t" \
12945 "movl %%esp,%0\n\t" /* save ESP */ \
12946 "movl %5,%%esp\n\t" /* restore ESP */ \
12947 "movl $1f,%1\n\t" /* save EIP */ \
12948 "pushl %6\n\t" /* restore EIP */ \
12949 "jmp __switch_to\n" \
12950 "1:\t" \
12951 "popl %%ebp\n\t" \
12952 "popl %%edi\n\t" \
12953 "popl %%esi\n\t" \
12954 "popl %%ebx" \
12955 :"=m" (prev->tss.esp),"=m" (prev->tss.eip), \
12956 "=a" (eax), "=d" (edx), "=c" (ecx) \
12957 :"m" (next->tss.esp),"m" (next->tss.eip), \
12958 "a" (prev), "d" (next)); \
12959 } while (0)
12960
12961 #define _set_base(addr,base) do { unsigned long __pr; \
12962 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
12963 "rorl $16,%%edx\n\t" \
12964 "movb %%dl,%2\n\t" \
12965 "movb %%dh,%3" \
12966 :"=&d" (__pr) \
12967 :"m" (*((addr)+2)), \
12968 "m" (*((addr)+4)), \
12969 "m" (*((addr)+7)), \
12970 "0" (base) \
12971 ); } while(0)
12972
12973 #define _set_limit(addr,limit) do { unsigned long __lr; \
12974 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
12975 "rorl $16,%%edx\n\t" \
12976 "movb %2,%%dh\n\t" \
12977 "andb $0xf0,%%dh\n\t" \
12978 "orb %%dh,%%dl\n\t" \
12979 "movb %%dl,%2" \
12980 :"=&d" (__lr) \
12981 :"m" (*(addr)), \
12982 "m" (*((addr)+6)), \
12983 "0" (limit) \
12984 ); } while(0)
12985
12986 #define set_base(ldt,base) \
12987 _set_base( ((char *)&(ldt)) , (base) )
12988 #define set_limit(ldt,limit) \
12989 _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
12990
12991 static inline unsigned long _get_base(char * addr)
12992 {
12993 unsigned long __base;
12994 __asm__("movb %3,%%dh\n\t"
12995 "movb %2,%%dl\n\t"
12996 "shll $16,%%edx\n\t"
12997 "movw %1,%%dx"
12998 :"=&d" (__base)
12999 :"m" (*((addr)+2)),
13000 "m" (*((addr)+4)),
13001 "m" (*((addr)+7)));
13002 return __base;
13003 }
13004
13005 #define get_base(ldt) _get_base( ((char *)&(ldt)) )
13006
13007 /* Load a segment. Fall back on loading the zero segment
13008 * if something goes wrong.. */
13009 #define loadsegment(seg,value) \
13010 asm volatile("\n" \
13011 "1:\t" \
13012 "movl %0,%%" #seg "\n" \
13013 "2:\n" \
13014 ".section .fixup,\"ax\"\n" \
13015 "3:\t" \
13016 "pushl $0\n\t" \
13017 "popl %%" #seg "\n\t" \
13018 "jmp 2b\n" \
13019 ".previous\n" \
13020 ".section __ex_table,\"a\"\n\t" \
13021 ".align 4\n\t" \
13022 ".long 1b,3b\n" \
13023 ".previous" \
13024 : :"m" (*(unsigned int *)&(value)))
13025
13026 /* Clear and set 'TS' bit respectively */
13027 #define clts() __asm__ __volatile__ ("clts")
13028 #define read_cr0() ({ \
13029 unsigned int __dummy; \
13030 __asm__( \
13031 "movl %%cr0,%0\n\t" \
13032 :"=r" (__dummy)); \
13033 __dummy; \
13034 })
13035 #define write_cr0(x) \
13036 __asm__("movl %0,%%cr0": :"r" (x));
13037 #define stts() write_cr0(8 | read_cr0())
13038
13039 #endif /* __KERNEL__ */
13040
13041 static inline unsigned long get_limit(
13042 unsigned long segment)
13043 {
13044 unsigned long __limit;
13045 __asm__("lsll %1,%0"
13046 :"=r" (__limit):"r" (segment));
13047 return __limit+1;
13048 }
13049
13050 #define nop() __asm__ __volatile__ ("nop")
13051
13052 #define xchg(ptr,x) ((__typeof__(*(ptr))) \
13053 __xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
13054 #define tas(ptr) (xchg((ptr),1))
13055
13056 struct __xchg_dummy { unsigned long a[100]; };
13057 #define __xg(x) ((struct __xchg_dummy *)(x))
13058
13059 /* Note: no "lock" prefix even on SMP: xchg always
13060 * implies lock anyway */
13061 static inline unsigned long __xchg(unsigned long x,
13062 void * ptr, int size)
13063 {
13064 switch (size) {
13065 case 1:
13066 __asm__("xchgb %b0,%1"
13067 :"=q" (x)
13068 :"m" (*__xg(ptr)), "0" (x)
13069 :"memory");
13070 break;
13071 case 2:
13072 __asm__("xchgw %w0,%1"
13073 :"=r" (x)
13074 :"m" (*__xg(ptr)), "0" (x)
13075 :"memory");
13076 break;
13077 case 4:
13078 __asm__("xchgl %0,%1"
13079 :"=r" (x)
13080 :"m" (*__xg(ptr)), "0" (x)
13081 :"memory");
13082 break;
13083 }
13084 return x;
13085 }
13086
13087 /* Force strict CPU ordering. And yes, this is required
13088 * on UP too when we're talking to devices.
13089 *
13090 * For now, "wmb()" doesn't actually do anything, as all
13091 * Intel CPU's follow what Intel calls a *Processor
13092 * Order*, in which all writes are seen in the program
13093 * order even outside the CPU.
13094 *
13095 * I expect future Intel CPU's to have a weaker ordering,
13096 * but I'd also expect them to finally get their act
13097 * together and add some real memory barriers if so. */
13098 #define mb() __asm__ __volatile__ \
13099 ("lock; addl $0,0(%%esp)": : :"memory")
13100 #define rmb() mb()
13101 #define wmb() __asm__ __volatile__ ("": : :"memory")
13102
13103 /* interrupt control.. */
13104 #define __sti() __asm__ __volatile__ ("sti": : :"memory")
13105 #define __cli() __asm__ __volatile__ ("cli": : :"memory")
13106 #define __save_flags(x) __asm__ __volatile__ \
13107 ("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
13108 #define __restore_flags(x) __asm__ __volatile__ \
13109 ("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
13110
13111 #ifdef __SMP__
13112
13113 extern void __global_cli(void);
13114 extern void __global_sti(void);
13115 extern unsigned long __global_save_flags(void);
13116 extern void __global_restore_flags(unsigned long);
13117 #define cli() __global_cli()
13118 #define sti() __global_sti()
13119 #define save_flags(x) ((x)=__global_save_flags())
13120 #define restore_flags(x) __global_restore_flags(x)
13121
13122 #else
13123
13124 #define cli() __cli()
13125 #define sti() __sti()
13126 #define save_flags(x) __save_flags(x)
13127 #define restore_flags(x) __restore_flags(x)
13128
13129 #endif
13130
13131 /* disable hlt during certain critical i/o operations */
13132 #define HAVE_DISABLE_HLT
13133 void disable_hlt(void);
13134 void enable_hlt(void);
13135
13136 #endif
Сайт управляется системой
uCoz