netlib.narod.ru< Назад | Оглавление | Далее >

arch/i386/kernel/traps.c

 6091 /*
 6092  *  linux/arch/i386/traps.c
 6093  *
 6094  *  Copyright (C) 1991, 1992  Linus Torvalds
 6095  */
 6096 
 6097 /* 'Traps.c' handles hardware traps and faults after we
 6098  * have saved some state in 'asm.s'.  */
 6099 #include <linux/config.h>
 6100 #include <linux/sched.h>
 6101 #include <linux/kernel.h>
 6102 #include <linux/string.h>
 6103 #include <linux/errno.h>
 6104 #include <linux/ptrace.h>
 6105 #include <linux/timer.h>
 6106 #include <linux/mm.h>
 6107 #include <linux/smp.h>
 6108 #include <linux/smp_lock.h>
 6109 #include <linux/init.h>
 6110 #include <linux/delay.h>
 6111 
 6112 #ifdef CONFIG_MCA
 6113 #include <linux/mca.h>
 6114 #include <asm/processor.h>
 6115 #endif
 6116 
 6117 #include <asm/system.h>
 6118 #include <asm/uaccess.h>
 6119 #include <asm/io.h>
 6120 #include <asm/spinlock.h>
 6121 #include <asm/atomic.h>
 6122 #include <asm/debugreg.h>
 6123 #include <asm/desc.h>
 6124 
 6125 #include <asm/smp.h>
 6126 
 6127 #ifdef CONFIG_X86_VISWS_APIC
 6128 #include <asm/fixmap.h>
 6129 #include <asm/cobalt.h>
 6130 #include <asm/lithium.h>
 6131 #endif
 6132 
 6133 #include "irq.h"
 6134 
 6135 asmlinkage int system_call(void);
 6136 asmlinkage void lcall7(void);
 6137 
 6138 struct desc_struct default_ldt = { 0, 0 };
 6139 
 6140 /* The IDT has to be page-aligned to simplify the Pentium
 6141  * F0 0F bug workaround.. We have a special link segment
 6142  * for this.  */
 6143 struct desc_struct idt_table[256]
 6144 __attribute__((__section__(".data.idt"))) = { {0, 0}, };
 6145 
 6146 static inline void console_verbose(void)
 6147 {
 6148   extern int console_loglevel;
 6149   console_loglevel = 15;
 6150 }
 6151 
 6152 #define DO_ERROR(trapnr, signr, str, name, tsk)         \
 6153 asmlinkage void do_##name(struct pt_regs * regs,        \
 6154                           long error_code)              \
 6155 {                                                       \
 6156   tsk->tss.error_code = error_code;                     \
 6157   tsk->tss.trap_no = trapnr;                            \
 6158   force_sig(signr, tsk);                                \
 6159   die_if_no_fixup(str,regs,error_code);                 \
 6160 }
 6161 
 6162 #define DO_VM86_ERROR(trapnr, signr, str, name, tsk)    \
 6163 asmlinkage void do_##name(struct pt_regs * regs,        \
 6164                           long error_code)              \
 6165 {                                                       \
 6166   lock_kernel();                                        \
 6167   if (regs->eflags & VM_MASK) {                         \
 6168     if (!handle_vm86_trap((struct kernel_vm86_regs *)   \
 6169                           regs, error_code, trapnr))    \
 6170       goto out;                                         \
 6171     /* else fall through */                             \
 6172   }                                                     \
 6173   tsk->tss.error_code = error_code;                     \
 6174   tsk->tss.trap_no = trapnr;                            \
 6175   force_sig(signr, tsk);                                \
 6176   die_if_kernel(str,regs,error_code);                   \
 6177 out:                                                    \
 6178   unlock_kernel();
 6179 }
 6180 
 6181 void page_exception(void);
 6182 
 6183 asmlinkage void divide_error(void);
 6184 asmlinkage void debug(void);
 6185 asmlinkage void nmi(void);
 6186 asmlinkage void int3(void);
 6187 asmlinkage void overflow(void);
 6188 asmlinkage void bounds(void);
 6189 asmlinkage void invalid_op(void);
 6190 asmlinkage void device_not_available(void);
 6191 asmlinkage void double_fault(void);
 6192 asmlinkage void coprocessor_segment_overrun(void);
 6193 asmlinkage void invalid_TSS(void);
 6194 asmlinkage void segment_not_present(void);
 6195 asmlinkage void stack_segment(void);
 6196 asmlinkage void general_protection(void);
 6197 asmlinkage void page_fault(void);
 6198 asmlinkage void coprocessor_error(void);
 6199 asmlinkage void reserved(void);
 6200 asmlinkage void alignment_check(void);
 6201 asmlinkage void spurious_interrupt_bug(void);
 6202 
 6203 int kstack_depth_to_print = 24;
 6204 
 6205 /* These constants are for searching for possible module
 6206  * text segments.  VMALLOC_OFFSET comes from
 6207  * mm/vmalloc.c; MODULE_RANGE is a guess of how much
 6208  * space is likely to be vmalloced.  */
 6209 #define VMALLOC_OFFSET (8*1024*1024)
 6210 #define MODULE_RANGE (8*1024*1024)
 6211 
 6212 static void show_registers(struct pt_regs *regs)
 6213 {
 6214   int i;
 6215   int in_kernel = 1;
 6216   unsigned long esp;
 6217   unsigned short ss;
 6218   unsigned long *stack, addr, module_start, module_end;
 6219 
 6220   esp = (unsigned long) (1+regs);
 6221   ss = __KERNEL_DS;
 6222   if (regs->xcs & 3) {
 6223     in_kernel = 0;
 6224     esp = regs->esp;
 6225     ss = regs->xss & 0xffff;
 6226   }
 6227   printk("CPU:    %d\nEIP:    %04x:[<%08lx>]"
 6228          "\nEFLAGS: %08lx\n", smp_processor_id(),
 6229          0xffff & regs->xcs, regs->eip, regs->eflags);
 6230   printk("eax: %08lx   ebx: %08lx   ecx: %08lx   "
 6231          "edx: %08lx\n",
 6232          regs->eax, regs->ebx, regs->ecx, regs->edx);
 6233   printk("esi: %08lx   edi: %08lx   ebp: %08lx   "
 6234          "esp: %08lx\n",
 6235          regs->esi, regs->edi, regs->ebp, esp);
 6236   printk("ds: %04x   es: %04x   ss: %04x\n",
 6237          regs->xds & 0xffff, regs->xes & 0xffff, ss);
 6238   store_TR(i);
 6239   printk("Process %s (pid: %d, process nr: %d, "
 6240          "stackpage=%08lx)", current->comm, current->pid,
 6241          0xffff & i, 4096+(unsigned long)current);
 6242 
 6243   /* When in-kernel, we also print out the stack and code
 6244    * at the time of the fault..  */
 6245   if (in_kernel) {
 6246     printk("\nStack: ");
 6247     stack = (unsigned long *) esp;
 6248     for(i=0; i < kstack_depth_to_print; i++) {
 6249       if (((long) stack & 4095) == 0)
 6250         break;
 6251       if (i && ((i % 8) == 0))
 6252         printk("\n       ");
 6253       printk("%08lx ", *stack++);
 6254     }
 6255     printk("\nCall Trace: ");
 6256     stack = (unsigned long *) esp;
 6257     i = 1;
 6258     module_start = PAGE_OFFSET + (max_mapnr<<PAGE_SHIFT);
 6259     module_start = ((module_start + VMALLOC_OFFSET) &
 6260                     ~(VMALLOC_OFFSET-1));
 6261     module_end = module_start + MODULE_RANGE;
 6262     while (((long) stack & 4095) != 0) {
 6263       addr = *stack++;
 6264       /* If the address is either in the text segment of
 6265        * the kernel, or in the region which contains
 6266        * vmalloc'ed memory, it *may* be the address of a
 6267        * calling routine; if so, print it so that someone
 6268        * tracing down the cause of the crash will be able
 6269        * to figure out the call path that was taken.  */
 6270       if (((addr >= (unsigned long) &_stext) &&
 6271            (addr <= (unsigned long) &_etext)) ||
 6272           ((addr >= module_start) &&
 6273            (addr <= module_end))) {
 6274         if (i && ((i % 8) == 0))
 6275           printk("\n       ");
 6276         printk("[<%08lx>] ", addr);
 6277         i++;
 6278       }
 6279     }
 6280     printk("\nCode: ");
 6281     for(i=0;i<20;i++)
 6282       printk("%02x ", ((unsigned char *)regs->eip)[i]);
 6283   }
 6284   printk("\n");
 6285 }
 6286 
 6287 spinlock_t die_lock;
 6288 
 6289 void die(const char * str, struct pt_regs * regs,
 6290          long err)
 6291 {
 6292   console_verbose();
 6293   spin_lock_irq(&die_lock);
 6294   printk("%s: %04lx\n", str, err & 0xffff);
 6295   show_registers(regs);
 6296   spin_unlock_irq(&die_lock);
 6297   do_exit(SIGSEGV);
 6298 }
 6299 
 6300 static inline void die_if_kernel(const char * str,
 6301                          struct pt_regs * regs, long err)
 6302 {
 6303   if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs))
 6304     die(str, regs, err);
 6305 }
 6306 
 6307 static void die_if_no_fixup(const char * str,
 6308                          struct pt_regs * regs, long err)
 6309 {
 6310   if (!(regs->eflags & VM_MASK) && !(3 & regs->xcs))
 6311   {
 6312     unsigned long fixup;
 6313     fixup = search_exception_table(regs->eip);
 6314     if (fixup) {
 6315       regs->eip = fixup;
 6316       return;
 6317     }
 6318     die(str, regs, err);
 6319   }
 6320 }
 6321 
 6322 DO_VM86_ERROR( 0, SIGFPE,  "divide error", divide_error,
 6323                current)
 6324 DO_VM86_ERROR( 3, SIGTRAP, "int3", int3, current)
 6325 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow, current)
 6326 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds, current)
 6327 DO_ERROR( 6, SIGILL,  "invalid operand", invalid_op,
 6328           current)
 6329 DO_VM86_ERROR( 7, SIGSEGV, "device not available",
 6330                device_not_available, current)
 6331 DO_ERROR( 8, SIGSEGV, "double fault", double_fault,
 6332           current)
 6333 DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun",
 6334           coprocessor_segment_overrun, current)
 6335 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS,
 6336          current)
 6337 DO_ERROR(11, SIGBUS,  "segment not present",
 6338          segment_not_present, current)
 6339 DO_ERROR(12, SIGBUS,  "stack segment", stack_segment,
 6340          current)
 6341 DO_ERROR(17, SIGSEGV, "alignment check", alignment_check,
 6342          current)
 6343 DO_ERROR(18, SIGSEGV, "reserved", reserved, current)
 6344 /* I don't have documents for this but it does seem to
 6345  * cover the cache flush from user space exception some
 6346  * people get. */
 6347 DO_ERROR(19, SIGSEGV, "cache flush denied",
 6348          cache_flush_denied, current)
 6349 
 6350 asmlinkage void cache_flush_denied(struct pt_regs * regs,
 6351                                    long error_code)
 6352 {
 6353   if (regs->eflags & VM_MASK) {
 6354     handle_vm86_fault((struct kernel_vm86_regs *) regs,
 6355                       error_code);
 6356     return;
 6357   }
 6358   die_if_kernel("cache flush denied",regs,error_code);
 6359   current->tss.error_code = error_code;
 6360   current->tss.trap_no = 19;
 6361   force_sig(SIGSEGV, current);
 6362 }
 6363 
 6364 asmlinkage void do_general_protection(
 6365   struct pt_regs * regs, long error_code)
 6366 {
 6367   if (regs->eflags & VM_MASK)
 6368     goto gp_in_vm86;
 6369 
 6370   if (!(regs->xcs & 3))
 6371     goto gp_in_kernel;
 6372 
 6373   current->tss.error_code = error_code;
 6374   current->tss.trap_no = 13;
 6375   force_sig(SIGSEGV, current);
 6376   return;
 6377 
 6378 gp_in_vm86:
 6379   lock_kernel();
 6380   handle_vm86_fault((struct kernel_vm86_regs *) regs,
 6381                     error_code);
 6382   unlock_kernel();
 6383   return;
 6384 
 6385 gp_in_kernel:
 6386   {
 6387     unsigned long fixup;
 6388     fixup = search_exception_table(regs->eip);
 6389     if (fixup) {
 6390       regs->eip = fixup;
 6391       return;
 6392     }
 6393     die("general protection fault", regs, error_code);
 6394   }
 6395 }
 6396 
 6397 static void mem_parity_error(unsigned char reason,
 6398                              struct pt_regs * regs)
 6399 {
 6400   printk("Uhhuh. NMI received. Dazed and confused, "
 6401          "but trying to continue\n");
 6402   printk("You probably have a hardware problem with "
 6403          "your RAM chips\n");
 6404 }
 6405 
 6406 static void io_check_error(unsigned char reason,
 6407                            struct pt_regs * regs)
 6408 {
 6409   unsigned long i;
 6410 
 6411   printk("NMI: IOCK error (debug interrupt?)\n");
 6412   show_registers(regs);
 6413 
 6414   /* Re-enable the IOCK line, wait for a few seconds */
 6415   reason |= 8;
 6416   outb(reason, 0x61);
 6417   i = 2000;
 6418   while (--i) udelay(1000);
 6419   reason &= ~8;
 6420   outb(reason, 0x61);
 6421 }
 6422 
 6423 static void unknown_nmi_error(unsigned char reason,
 6424                               struct pt_regs * regs)
 6425 {
 6426 #ifdef CONFIG_MCA
 6427   /* Might actually be able to figure out what the guilty
 6428    * party is. */
 6429   if( MCA_bus ) {
 6430     mca_handle_nmi();
 6431     return;
 6432   }
 6433 #endif
 6434   printk("Uhhuh. NMI received for unknown reason %02x.\n"
 6435          , reason);
 6436   printk("Dazed and confused, but trying to continue\n");
 6437   printk("Do you have a strange power saving mode "
 6438          "enabled?\n");
 6439 }
 6440 
 6441 asmlinkage void do_nmi(struct pt_regs * regs,
 6442                        long error_code)
 6443 {
 6444   unsigned char reason = inb(0x61);
 6445   extern atomic_t nmi_counter;
 6446 
 6447   atomic_inc(&nmi_counter);
 6448   if (reason & 0x80)
 6449     mem_parity_error(reason, regs);
 6450   if (reason & 0x40)
 6451     io_check_error(reason, regs);
 6452   if (!(reason & 0xc0))
 6453     unknown_nmi_error(reason, regs);
 6454 }
 6455 
 6456 /* Careful - we must not do a lock-kernel until we have
 6457  * checked that the debug fault happened in user
 6458  * mode. Getting debug exceptions while in the kernel has
 6459  * to be handled without locking, to avoid deadlocks..
 6460  *
 6461  * Being careful here means that we don't have to be as
 6462  * careful in a lot of more complicated places (task
 6463  * switching can be a bit lazy about restoring all the
 6464  * debug state, and ptrace doesn't have to find every
 6465  * occurrence of the TF bit that could be saved away even
 6466  * by user code - and we don't have to be careful about
 6467  * what values can be written to the debug registers
 6468  * because there are no really bad cases).  */
 6469 asmlinkage void do_debug(struct pt_regs * regs,
 6470                          long error_code)
 6471 {
 6472   unsigned int condition;
 6473   struct task_struct *tsk = current;
 6474 
 6475   if (regs->eflags & VM_MASK)
 6476     goto debug_vm86;
 6477 
 6478   __asm__ __volatile__("movl %%db6,%0" : "=r"
 6479                        (condition));
 6480 
 6481   /* Mask out spurious TF errors due to lazy TF
 6482    * clearing */
 6483   if (condition & DR_STEP) {
 6484     /* The TF error should be masked out only if the
 6485      * current process is not traced and if the TRAP flag
 6486      * has been set previously by a tracing process
 6487      * (condition detected by the PF_DTRACE flag);
 6488      * remember that the i386 TRAP flag can be modified
 6489      * by the process itself in user mode, allowing
 6490      * programs to debug themselves without the ptrace()
 6491      * interface.  */
 6492     if ((tsk->flags & (PF_DTRACE|PF_PTRACED)) ==
 6493         PF_DTRACE)
 6494       goto clear_TF;
 6495   }
 6496 
 6497   /* Mask out spurious debug traps due to lazy DR7
 6498    * setting */
 6499   if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)){
 6500     if (!tsk->tss.debugreg[7])
 6501       goto clear_dr7;
 6502   }
 6503 
 6504   /* If this is a kernel mode trap, we need to reset db7
 6505    * to allow us to continue sanely */
 6506   if ((regs->xcs & 3) == 0)
 6507     goto clear_dr7;
 6508 
 6509   /* Ok, finally something we can handle */
 6510   tsk->tss.trap_no = 1;
 6511   tsk->tss.error_code = error_code;
 6512   force_sig(SIGTRAP, tsk);
 6513   return;
 6514 
 6515 debug_vm86:
 6516   lock_kernel();
 6517   handle_vm86_trap((struct kernel_vm86_regs *) regs,
 6518                    error_code, 1);
 6519   unlock_kernel();
 6520   return;
 6521 
 6522 clear_dr7:
 6523   __asm__("movl %0,%%db7"
 6524     : /* no output */
 6525     : "r" (0));
 6526   return;
 6527 
 6528 clear_TF:
 6529   regs->eflags &= ~TF_MASK;
 6530   return;
 6531 }
 6532 
 6533 /* Note that we play around with the 'TS' bit in an
 6534  * attempt to get the correct behaviour even in the
 6535  * presence of the asynchronous IRQ13 behaviour */
 6536 void math_error(void)
 6537 {
 6538   struct task_struct * task;
 6539 
 6540   /* Save the info for the exception handler (this will
 6541    * also clear the error) */
 6542   task = current;
 6543   save_fpu(task);
 6544   task->tss.trap_no = 16;
 6545   task->tss.error_code = 0;
 6546   force_sig(SIGFPE, task);
 6547 }
 6548 
 6549 asmlinkage void do_coprocessor_error(
 6550   struct pt_regs * regs, long error_code)
 6551 {
 6552   ignore_irq13 = 1;
 6553   math_error();
 6554 }
 6555 
 6556 asmlinkage void do_spurious_interrupt_bug(
 6557   struct pt_regs * regs, long error_code)
 6558 {
 6559 #if 0
 6560   /* No need to warn about this any longer. */
 6561   printk("Ignoring P6 Local APIC Spurious Interrupt "
 6562          "Bug...\n");
 6563 #endif
 6564 }
 6565 
 6566 /* 'math_state_restore()' saves the current math
 6567  * information in the old math state array, and gets the
 6568  * new ones from the current task
 6569  *
 6570  * Careful.. There are problems with IBM-designed IRQ13
 6571  * behaviour.  Don't touch unless you *really* know how
 6572  * it works.  */
 6573 asmlinkage void math_state_restore(struct pt_regs regs)
 6574 {
 6575   /* Allow maths ops (or we recurse) */
 6576   __asm__ __volatile__("clts");
 6577   if(current->used_math)
 6578     __asm__("frstor %0": :"m" (current->tss.i387));
 6579   else
 6580   {
 6581     /* Our first FPU usage, clean the chip. */
 6582     __asm__("fninit");
 6583     current->used_math = 1;
 6584   }
 6585   /* So we fnsave on switch_to() */
 6586   current->flags|=PF_USEDFPU;
 6587 }
 6588 
 6589 #ifndef CONFIG_MATH_EMULATION
 6590 
 6591 asmlinkage void math_emulate(long arg)
 6592 {
 6593   lock_kernel();
 6594   printk("math-emulation not enabled and no coprocessor "
 6595          "found.\n");
 6596   printk("killing %s.\n",current->comm);
 6597   force_sig(SIGFPE,current);
 6598   schedule();
 6599   unlock_kernel();
 6600 }
 6601 
 6602 #endif /* CONFIG_MATH_EMULATION */
 6603 
 6604 __initfunc(void trap_init_f00f_bug(void))
 6605 {
 6606   unsigned long page;
 6607   pgd_t * pgd;
 6608   pmd_t * pmd;
 6609   pte_t * pte;
 6610 
 6611   /* Allocate a new page in virtual address space, move
 6612    * the IDT into it and write protect this page.  */
 6613   page = (unsigned long) vmalloc(PAGE_SIZE);
 6614   pgd = pgd_offset(&init_mm, page);
 6615   pmd = pmd_offset(pgd, page);
 6616   pte = pte_offset(pmd, page);
 6617   free_page(pte_page(*pte));
 6618   *pte = mk_pte(&idt_table, PAGE_KERNEL_RO);
 6619   local_flush_tlb();
 6620 
 6621   /* "idt" is magic - it overlaps the idt_descr variable
 6622    * so that updating idt will automatically update the
 6623    * idt descriptor..  */
 6624   idt = (struct desc_struct *)page;
 6625   __asm__ __volatile__("lidt %0": "=m" (idt_descr));
 6626 }
 6627 
 6628 #define _set_gate(gate_addr,type,dpl,addr)              \
 6629 do {                                                    \
 6630   int __d0, __d1;                                       \
 6631   __asm__ __volatile__ ("movw %%dx,%%ax\n\t"            \
 6632   "movw %4,%%dx\n\t"                                    \
 6633   "movl %%eax,%0\n\t"                                   \
 6634   "movl %%edx,%1"                                       \
 6635   :"=m" (*((long *) (gate_addr))),                      \
 6636    "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0),      \
 6637                         "=&d" (__d1)                    \
 6638   :"i" ((short) (0x8000+(dpl<<13)+(type<<8))),          \
 6639    "3" ((char *) (addr)),"2" (__KERNEL_CS << 16));
 6640 } while (0)
 6641 
 6642 
 6643 /* This needs to use 'idt_table' rather than 'idt', and
 6644  * thus use the _nonmapped_ version of the IDT, as the
 6645  * Pentium F0 0F bugfix can have resulted in the mapped
 6646  * IDT being write-protected.  */
 Комментарий
 6647 void set_intr_gate(unsigned int n, void *addr)
 6648 {
 6649   _set_gate(idt_table+n,14,0,addr);
 6650 }
 6651 
 6652 static void __init set_trap_gate(unsigned int n,
 6653                                  void *addr)
 6654 {
 6655   _set_gate(idt_table+n,15,0,addr);
 6656 }
 6657 
 6658 static void __init set_system_gate(unsigned int n,
 6659                                    void *addr)
 6660 {
 6661   _set_gate(idt_table+n,15,3,addr);
 6662 }
 6663 
 6664 static void __init set_call_gate(void *a, void *addr)
 6665 {
 6666   _set_gate(a,12,3,addr);
 6667 }
 6668 
 6669 #define _set_seg_desc(gate_addr,type,dpl,base,limit) {  \
 6670   *((gate_addr)+1) = ((base) & 0xff000000) |            \
 6671     (((base) & 0x00ff0000)>>16) |                       \
 6672     ((limit) & 0xf0000) |                               \
 6673     ((dpl)<<13) |                                       \
 6674     (0x00408000) |                                      \
 6675     ((type)<<8);                                        \
 6676   *(gate_addr) = (((base) & 0x0000ffff)<<16) |          \
 6677     ((limit) & 0x0ffff); }
 6678 
 6679 #define _set_tssldt_desc(n,addr,limit,type)             \
 6680 __asm__ __volatile__ ("movw %3,0(%2)\n\t"               \
 6681   "movw %%ax,2(%2)\n\t"                                 \
 6682   "rorl $16,%%eax\n\t"                                  \
 6683   "movb %%al,4(%2)\n\t"                                 \
 6684   "movb %4,5(%2)\n\t"                                   \
 6685   "movb $0,6(%2)\n\t"                                   \
 6686   "movb %%ah,7(%2)\n\t"                                 \
 6687   "rorl $16,%%eax"                                      \
 6688   : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit),       \
 6689                       "i"(type))
 6690 
 6691 void set_tss_desc(unsigned int n, void *addr)
 6692 {
 6693   _set_tssldt_desc(gdt_table+FIRST_TSS_ENTRY+(n<<1),
 6694                    (int)addr, 235, 0x89);
 6695 }
 6696 
 6697 void set_ldt_desc(unsigned int n, void *addr,
 6698                   unsigned int size)
 6699 {
 6700   _set_tssldt_desc(gdt_table+FIRST_LDT_ENTRY+(n<<1),
 6701                    (int)addr, ((size << 3) - 1), 0x82);
 6702 }
 6703 
 6704 #ifdef CONFIG_X86_VISWS_APIC
 6705 
 6706 /* On Rev 005 motherboards legacy device interrupt lines
 6707  * are wired directly to Lithium from the 307.  But the
 6708  * PROM leaves the interrupt type of each 307 logical
 6709  * device set appropriate for the 8259.  Later we'll
 6710  * actually use the 8259, but for now we have to flip the
 6711  * interrupt types to level triggered, active lo as
 6712  * required by Lithium.  */
 6713 #define REG 0x2e  /* The register to read/write */
 6714 #define DEV 0x07  /* Register: Logical device select */
 6715 #define VAL 0x2f  /* The value to read/write */
 6716 
 6717 static void
 6718 superio_outb(int dev, int reg, int val)
 6719 {
 6720   outb(DEV, REG);
 6721   outb(dev, VAL);
 6722   outb(reg, REG);
 6723   outb(val, VAL);
 6724 }
 6725 
 6726 static int __attribute__ ((unused))
 6727 superio_inb(int dev, int reg)
 6728 {
 6729   outb(DEV, REG);
 6730   outb(dev, VAL);
 6731   outb(reg, REG);
 6732   return inb(VAL);
 6733 }
 6734 
 6735 #define FLOP  3 /* floppy logical device */
 6736 #define PPORT 4 /* parallel logical device */
 6737 #define UART5 5 /* uart2 logical device (not wired up) */
 6738 #define UART6 6 /* uart1 logical device
 6739                  * (THIS is the serial port!) */
 6740 #define IDEST 0x70  /* int. destination
 6741                      * (which 307 IRQ line) reg. */
 6742 #define ITYPE 0x71  /* interrupt type register */
 6743 
 6744 /* interrupt type bits */
 6745 #define LEVEL 0x01  /* bit 0, 0 == edge triggered */
 6746 #define ACTHI 0x02  /* bit 1, 0 == active lo */
 6747 
 6748 static void
 6749 superio_init(void)
 6750 {
 6751   if (visws_board_type == VISWS_320 &&
 6752       visws_board_rev == 5) {
 6753     /* 0 means no intr propagated */
 6754     superio_outb(UART6, IDEST, 0);
 6755     printk("SGI 320 rev 5: "
 6756            "disabling 307 uart1 interrupt\n");
 6757   }
 6758 }
 6759 
 6760 static void
 6761 lithium_init(void)
 6762 {
 6763   set_fixmap(FIX_LI_PCIA, LI_PCI_A_PHYS);
 6764   printk("Lithium PCI Bridge A, Bus Number: %d\n",
 6765          li_pcia_read16(LI_PCI_BUSNUM) & 0xff);
 6766   set_fixmap(FIX_LI_PCIB, LI_PCI_B_PHYS);
 6767   printk("Lithium PCI Bridge B (PIIX4), Bus Number: "
 6768          "%d\n", li_pcib_read16(LI_PCI_BUSNUM) & 0xff);
 6769 
 6770   /* XXX blindly enables all interrupts */
 6771   li_pcia_write16(LI_PCI_INTEN, 0xffff);
 6772   li_pcib_write16(LI_PCI_INTEN, 0xffff);
 6773 }
 6774 
 6775 static void
 6776 cobalt_init(void)
 6777 {
 6778   /* On normal SMP PC this is used only with SMP, but we
 6779    * have to use it and set it up here to start the
 6780    * Cobalt clock */
 6781   set_fixmap(FIX_APIC_BASE, APIC_PHYS_BASE);
 6782   printk("Local APIC ID %lx\n", apic_read(APIC_ID));
 6783   printk("Local APIC Version %lx\n",
 6784          apic_read(APIC_VERSION));
 6785 
 6786   set_fixmap(FIX_CO_CPU, CO_CPU_PHYS);
 6787   printk("Cobalt Revision %lx\n",
 6788          co_cpu_read(CO_CPU_REV));
 6789 
 6790   set_fixmap(FIX_CO_APIC, CO_APIC_PHYS);
 6791   printk("Cobalt APIC ID %lx\n",
 6792          co_apic_read(CO_APIC_ID));
 6793 
 6794   /* Enable Cobalt APIC being careful to NOT change the
 6795    * ID! */
 6796   co_apic_write(CO_APIC_ID,
 6797                 co_apic_read(CO_APIC_ID)|CO_APIC_ENABLE);
 6798 
 6799   printk("Cobalt APIC enabled: ID reg %lx\n",
 6800          co_apic_read(CO_APIC_ID));
 6801 }
 6802 #endif
 6803 void __init trap_init(void)
 6804 {
 6805   if (readl(0x0FFFD9) ==
 6806       'E' + ('I' << 8) + ('S' << 16) + ('A' << 24))
 6807     EISA_bus = 1;
 6808   set_call_gate(&default_ldt,lcall7);
 6809   set_trap_gate(0,&divide_error);
 6810   set_trap_gate(1,&debug);
 6811   set_trap_gate(2,&nmi);
 6812   /* int3-5 can be called from all */
 6813   set_system_gate(3,&int3);
 6814   set_system_gate(4,&overflow);
 6815   set_system_gate(5,&bounds);
 6816   set_trap_gate(6,&invalid_op);
 6817   set_trap_gate(7,&device_not_available);
 6818   set_trap_gate(8,&double_fault);
 6819   set_trap_gate(9,&coprocessor_segment_overrun);
 6820   set_trap_gate(10,&invalid_TSS);
 6821   set_trap_gate(11,&segment_not_present);
 6822   set_trap_gate(12,&stack_segment);
 6823   set_trap_gate(13,&general_protection);
 6824   set_trap_gate(14,&page_fault);
 6825   set_trap_gate(15,&spurious_interrupt_bug);
 6826   set_trap_gate(16,&coprocessor_error);
 6827   set_trap_gate(17,&alignment_check);
 6828   set_system_gate(SYSCALL_VECTOR,&system_call);
 6829 
 6830   /* set up GDT task & ldt entries */
 6831   set_tss_desc(0, &init_task.tss);
 6832   set_ldt_desc(0, &default_ldt, 1);
 6833 
 6834   /* Clear NT, so that we won't have troubles with that
 6835    * later on */
 6836   __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
 6837   load_TR(0);
 6838   load_ldt(0);
 6839 #ifdef CONFIG_X86_VISWS_APIC
 6840   superio_init();
 6841   lithium_init();
 6842   cobalt_init();
 6843 #endif
 6844 }

netlib.narod.ru< Назад | Оглавление | Далее >

Сайт управляется системой uCoz