arch/i386/mm/fault.c
6892 /*
6893 * linux/arch/i386/mm/fault.c
6894 *
6895 * Copyright (C) 1995 Linus Torvalds
6896 */
6897
6898 #include <linux/signal.h>
6899 #include <linux/sched.h>
6900 #include <linux/kernel.h>
6901 #include <linux/errno.h>
6902 #include <linux/string.h>
6903 #include <linux/types.h>
6904 #include <linux/ptrace.h>
6905 #include <linux/mman.h>
6906 #include <linux/mm.h>
6907 #include <linux/smp.h>
6908 #include <linux/smp_lock.h>
6909 #include <linux/interrupt.h>
6910
6911 #include <asm/system.h>
6912 #include <asm/uaccess.h>
6913 #include <asm/pgtable.h>
6914 #include <asm/hardirq.h>
6915
6916 extern void die(const char *,struct pt_regs *,long);
6917
6918 /* Ugly, ugly, but the goto's result in better assembly..
6919 */
6920 int __verify_write(const void * addr, unsigned long size)
6921 {
6922 struct vm_area_struct * vma;
6923 unsigned long start = (unsigned long) addr;
6924
6925 if (!size)
6926 return 1;
6927
6928 vma = find_vma(current->mm, start);
6929 if (!vma)
6930 goto bad_area;
6931 if (vma->vm_start > start)
6932 goto check_stack;
6933
6934 good_area:
6935 if (!(vma->vm_flags & VM_WRITE))
6936 goto bad_area;
6937 size--;
6938 size += start & ~PAGE_MASK;
6939 size >>= PAGE_SHIFT;
6940 start &= PAGE_MASK;
6941
6942 for (;;) {
6943 handle_mm_fault(current,vma, start, 1);
6944 if (!size)
6945 break;
6946 size--;
6947 start += PAGE_SIZE;
6948 if (start < vma->vm_end)
6949 continue;
6950 vma = vma->vm_next;
6951 if (!vma || vma->vm_start != start)
6952 goto bad_area;
6953 if (!(vma->vm_flags & VM_WRITE))
6954 goto bad_area;;
6955 }
6956 return 1;
6957
6958 check_stack:
6959 if (!(vma->vm_flags & VM_GROWSDOWN))
6960 goto bad_area;
6961 if (expand_stack(vma, start) == 0)
6962 goto good_area;
6963
6964 bad_area:
6965 return 0;
6966 }
6967
6968 asmlinkage void do_invalid_op(struct pt_regs *,
6969 unsigned long);
6970 extern unsigned long idt;
6971
6972 /* This routine handles page faults. It determines the
6973 * address, and the problem, and then passes it off to
6974 * one of the appropriate routines.
6975 *
6976 * error_code:
6977 * bit 0 == 0 means no page found, 1 means prot fault
6978 * bit 1 == 0 means read, 1 means write
6979 * bit 2 == 0 means kernel, 1 means user-mode */
6980 asmlinkage void do_page_fault(struct pt_regs *regs,
6981 unsigned long error_code)
6982 {
6983 struct task_struct *tsk;
6984 struct mm_struct *mm;
6985 struct vm_area_struct * vma;
6986 unsigned long address;
6987 unsigned long page;
6988 unsigned long fixup;
6989 int write;
6990
6991 /* get the address */
6992 __asm__("movl %%cr2,%0":"=r" (address));
6993
6994 tsk = current;
6995 mm = tsk->mm;
6996
6997 /* If we're in an interrupt or have no user context, we
6998 * must not take the fault.. */
6999 if (in_interrupt() || mm == &init_mm)
7000 goto no_context;
7001
7002 down(&mm->mmap_sem);
7003
7004 vma = find_vma(mm, address);
7005 if (!vma)
7006 goto bad_area;
7007 if (vma->vm_start <= address)
7008 goto good_area;
7009 if (!(vma->vm_flags & VM_GROWSDOWN))
7010 goto bad_area;
7011 if (error_code & 4) {
7012 /* accessing the stack below %esp is always a bug.
7013 * The "+ 32" is there due to some instructions (like
7014 * pusha) doing post-decrement on the stack and that
7015 * doesn't show up until later.. */
7016 if (address + 32 < regs->esp)
7017 goto bad_area;
7018 }
7019 if (expand_stack(vma, address))
7020 goto bad_area;
7021 /* Ok, we have a good vm_area for this memory access, so
7022 * we can handle it.. */
7023 good_area:
7024 write = 0;
7025 switch (error_code & 3) {
7026 default: /* 3: write, present */
7027 #ifdef TEST_VERIFY_AREA
7028 if (regs->cs == KERNEL_CS)
7029 printk("WP fault at %08lx\n", regs->eip);
7030 #endif
7031 /* fall through */
7032 case 2: /* write, not present */
7033 if (!(vma->vm_flags & VM_WRITE))
7034 goto bad_area;
7035 write++;
7036 break;
7037 case 1: /* read, present */
7038 goto bad_area;
7039 case 0: /* read, not present */
7040 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
7041 goto bad_area;
7042 }
7043
7044 /* If for any reason at all we couldn't handle the
7045 * fault, make sure we exit gracefully rather than
7046 * endlessly redo the fault. */
7047 if (!handle_mm_fault(tsk, vma, address, write))
7048 goto do_sigbus;
7049
7050 /* Did it hit the DOS screen mem VA from vm86 mode? */
7051 if (regs->eflags & VM_MASK) {
7052 unsigned long bit = (address - 0xA0000) >>PAGE_SHIFT;
7053 if (bit < 32)
7054 tsk->tss.screen_bitmap |= 1 << bit;
7055 }
7056 up(&mm->mmap_sem);
7057 return;
7058
7059 /* Something tried to access memory that isn't in our
7060 * memory map.. Fix it, but check if it's kernel or user
7061 * first.. */
7062 bad_area:
7063 up(&mm->mmap_sem);
7064
7065 /* User mode accesses just cause a SIGSEGV */
7066 if (error_code & 4) {
7067 tsk->tss.cr2 = address;
7068 tsk->tss.error_code = error_code;
7069 tsk->tss.trap_no = 14;
7070 force_sig(SIGSEGV, tsk);
7071 return;
7072 }
7073
7074 /* Pentium F0 0F C7 C8 bug workaround. */
7075 if (boot_cpu_data.f00f_bug) {
7076 unsigned long nr;
7077
7078 nr = (address - idt) >> 3;
7079
7080 if (nr == 6) {
7081 do_invalid_op(regs, 0);
7082 return;
7083 }
7084 }
7085
7086 no_context:
7087 /* Are we prepared to handle this kernel fault? */
7088 if ((fixup = search_exception_table(regs->eip)) != 0) {
7089 regs->eip = fixup;
7090 return;
7091 }
7092
7093 /* Oops. The kernel tried to access some bad page. We'll
7094 * have to terminate things with extreme prejudice.
7095 * First we check if it was the bootup rw-test, though..
7096 */
7097 if (boot_cpu_data.wp_works_ok < 0 &&
7098 address == PAGE_OFFSET && (error_code & 1)) {
7099 boot_cpu_data.wp_works_ok = 1;
7100 pg0[0] = pte_val(mk_pte(PAGE_OFFSET, PAGE_KERNEL));
7101 local_flush_tlb();
7102 /* Beware: Black magic here. The printk is needed
7103 * here to flush CPU state on certain buggy
7104 * processors. */
7105 printk("Ok");
7106 return;
7107 }
7108
7109 if (address < PAGE_SIZE)
7110 printk(KERN_ALERT "Unable to handle kernel "
7111 "NULL pointer dereference");
7112 else
7113 printk(KERN_ALERT "Unable to handle kernel "
7114 "paging request");
7115 printk(" at virtual address %08lx\n",address);
7116 __asm__("movl %%cr3,%0" : "=r" (page));
7117 printk(KERN_ALERT "current->tss.cr3 = %08lx, "
7118 "%%cr3 = %08lx\n", tsk->tss.cr3, page);
7119 page = ((unsigned long *) __va(page))[address >> 22];
7120 printk(KERN_ALERT "*pde = %08lx\n", page);
7121 if (page & 1) {
7122 page &= PAGE_MASK;
7123 address &= 0x003ff000;
7124 page = ((unsigned long *)
7125 __va(page))[address >> PAGE_SHIFT];
7126 printk(KERN_ALERT "*pte = %08lx\n", page);
7127 }
7128 die("Oops", regs, error_code);
7129 do_exit(SIGKILL);
7130
7131 /* We ran out of memory, or some other thing happened to
7132 * us that made us unable to handle the page fault
7133 * gracefully. */
7134 do_sigbus:
7135 up(&mm->mmap_sem);
7136
7137 /* Send a sigbus, regardless of whether we were in
7138 * kernel or user mode. */
7139 tsk->tss.cr2 = address;
7140 tsk->tss.error_code = error_code;
7141 tsk->tss.trap_no = 14;
7142 force_sig(SIGBUS, tsk);
7143
7144 /* Kernel mode? Handle exceptions or die */
7145 if (!(error_code & 4))
7146 goto no_context;
7147 }
Сайт управляется системой
uCoz