netlib.narod.ru< Назад | Оглавление | Далее >

mm/mprotect.c

33972 /*
33973  *      linux/mm/mprotect.c
33974  *
33975  *  (C) Copyright 1994 Linus Torvalds
33976  */
33977 #include <linux/slab.h>
33978 #include <linux/smp_lock.h>
33979 #include <linux/shm.h>
33980 #include <linux/mman.h>
33981 
33982 #include <asm/uaccess.h>
33983 #include <asm/pgtable.h>
33984 
33985 static inline void change_pte_range(pmd_t * pmd,
33986   unsigned long address, unsigned long size,
33987   pgprot_t newprot)
33988 {
33989   pte_t * pte;
33990   unsigned long end;
33991 
33992   if (pmd_none(*pmd))
33993     return;
33994   if (pmd_bad(*pmd)) {
33995     printk("change_pte_range: bad pmd (%08lx)\n",
33996            pmd_val(*pmd));
33997     pmd_clear(pmd);
33998     return;
33999   }
34000   pte = pte_offset(pmd, address);
34001   address &= ~PMD_MASK;
34002   end = address + size;
34003   if (end > PMD_SIZE)
34004     end = PMD_SIZE;
34005   do {
34006     pte_t entry = *pte;
34007     if (pte_present(entry))
34008       set_pte(pte, pte_modify(entry, newprot));
34009     address += PAGE_SIZE;
34010     pte++;
34011   } while (address < end);
34012 }
34013 
34014 static inline void change_pmd_range(pgd_t * pgd,
34015   unsigned long address, unsigned long size,
34016   pgprot_t newprot)
34017 {
34018   pmd_t * pmd;
34019   unsigned long end;
34020 
34021   if (pgd_none(*pgd))
34022     return;
34023   if (pgd_bad(*pgd)) {
34024     printk("change_pmd_range: bad pgd (%08lx)\n",
34025            pgd_val(*pgd));
34026     pgd_clear(pgd);
34027     return;
34028   }
34029   pmd = pmd_offset(pgd, address);
34030   address &= ~PGDIR_MASK;
34031   end = address + size;
34032   if (end > PGDIR_SIZE)
34033     end = PGDIR_SIZE;
34034   do {
34035     change_pte_range(pmd, address, end - address,
34036                      newprot);
34037     address = (address + PMD_SIZE) & PMD_MASK;
34038     pmd++;
34039   } while (address < end);
34040 }
34041 
34042 static void change_protection(unsigned long start,
34043   unsigned long end, pgprot_t newprot)
34044 {
34045   pgd_t *dir;
34046   unsigned long beg = start;
34047 
34048   dir = pgd_offset(current->mm, start);
34049   flush_cache_range(current->mm, beg, end);
34050   while (start < end) {
34051     change_pmd_range(dir, start, end - start, newprot);
34052     start = (start + PGDIR_SIZE) & PGDIR_MASK;
34053     dir++;
34054   }
34055   flush_tlb_range(current->mm, beg, end);
34056   return;
34057 }
34058 
34059 static inline int mprotect_fixup_all(
34060   struct vm_area_struct * vma,
34061   int newflags, pgprot_t prot)
34062 {
34063   vma->vm_flags = newflags;
34064   vma->vm_page_prot = prot;
34065   return 0;
34066 }
34067 
34068 static inline int mprotect_fixup_start(
34069   struct vm_area_struct * vma,
34070   unsigned long end, int newflags, pgprot_t prot)
34071 {
34072   struct vm_area_struct * n;
34073 
34074   n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
34075   if (!n)
34076     return -ENOMEM;
34077   *n = *vma;
34078   vma->vm_start = end;
34079   n->vm_end = end;
34080   vma->vm_offset += vma->vm_start - n->vm_start;
34081   n->vm_flags = newflags;
34082   n->vm_page_prot = prot;
34083   if (n->vm_file)
34084     n->vm_file->f_count++;
34085   if (n->vm_ops && n->vm_ops->open)
34086     n->vm_ops->open(n);
34087   insert_vm_struct(current->mm, n);
34088   return 0;
34089 }
34090 
34091 static inline int mprotect_fixup_end(
34092   struct vm_area_struct * vma,
34093   unsigned long start,
34094   int newflags, pgprot_t prot)
34095 {
34096   struct vm_area_struct * n;
34097 
34098   n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
34099   if (!n)
34100     return -ENOMEM;
34101   *n = *vma;
34102   vma->vm_end = start;
34103   n->vm_start = start;
34104   n->vm_offset += n->vm_start - vma->vm_start;
34105   n->vm_flags = newflags;
34106   n->vm_page_prot = prot;
34107   if (n->vm_file)
34108     n->vm_file->f_count++;
34109   if (n->vm_ops && n->vm_ops->open)
34110     n->vm_ops->open(n);
34111   insert_vm_struct(current->mm, n);
34112   return 0;
34113 }
34114 
34115 static inline int mprotect_fixup_middle(
34116   struct vm_area_struct * vma,
34117   unsigned long start, unsigned long end,
34118   int newflags, pgprot_t prot)
34119 {
34120   struct vm_area_struct * left, * right;
34121 
34122   left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
34123   if (!left)
34124     return -ENOMEM;
34125   right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
34126   if (!right) {
34127     kmem_cache_free(vm_area_cachep, left);
34128     return -ENOMEM;
34129   }
34130   *left = *vma;
34131   *right = *vma;
34132   left->vm_end = start;
34133   vma->vm_start = start;
34134   vma->vm_end = end;
34135   right->vm_start = end;
34136   vma->vm_offset += vma->vm_start - left->vm_start;
34137   right->vm_offset += right->vm_start - left->vm_start;
34138   vma->vm_flags = newflags;
34139   vma->vm_page_prot = prot;
34140   if (vma->vm_file)
34141     vma->vm_file->f_count += 2;
34142   if (vma->vm_ops && vma->vm_ops->open) {
34143     vma->vm_ops->open(left);
34144     vma->vm_ops->open(right);
34145   }
34146   insert_vm_struct(current->mm, left);
34147   insert_vm_struct(current->mm, right);
34148   return 0;
34149 }
34150 
34151 static int mprotect_fixup(struct vm_area_struct * vma,
34152   unsigned long start, unsigned long end,
34153   unsigned int newflags)
34154 {
34155   pgprot_t newprot;
34156   int error;
34157 
34158   if (newflags == vma->vm_flags)
34159     return 0;
34160   newprot = protection_map[newflags & 0xf];
34161   if (start == vma->vm_start) {
34162     if (end == vma->vm_end)
34163       error = mprotect_fixup_all(vma, newflags, newprot);
34164     else
34165       error = mprotect_fixup_start(vma, end, newflags,
34166                                    newprot);
34167   } else if (end == vma->vm_end)
34168     error = mprotect_fixup_end(vma, start, newflags,
34169                                newprot);
34170   else
34171     error = mprotect_fixup_middle(vma, start, end,
34172                                   newflags, newprot);
34173 
34174   if (error)
34175     return error;
34176 
34177   change_protection(start, end, newprot);
34178   return 0;
34179 }
34180 
34181 asmlinkage int sys_mprotect(unsigned long start,
34182   size_t len, unsigned long prot)
34183 {
34184   unsigned long nstart, end, tmp;
34185   struct vm_area_struct * vma, * next;
34186   int error = -EINVAL;
34187 
34188   if (start & ~PAGE_MASK)
34189     return -EINVAL;
34190   len = (len + ~PAGE_MASK) & PAGE_MASK;
34191   end = start + len;
34192   if (end < start)
34193     return -EINVAL;
34194   if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
34195     return -EINVAL;
34196   if (end == start)
34197     return 0;
34198 
34199   down(&current->mm->mmap_sem);
34200   lock_kernel();
34201 
34202   vma = find_vma(current->mm, start);
34203   error = -EFAULT;
34204   if (!vma || vma->vm_start > start)
34205     goto out;
34206 
34207   for (nstart = start ; ; ) {
34208     unsigned int newflags;
34209 
34210     /* Here we know that
34211      * vma->vm_start <= nstart < vma->vm_end. */
34212 
34213     newflags =
34214       prot | (vma->vm_flags &
34215               ~(PROT_READ | PROT_WRITE | PROT_EXEC));
34216     if ((newflags & ~(newflags >> 4)) & 0xf) {
34217       error = -EACCES;
34218       break;
34219     }
34220 
34221     if (vma->vm_end >= end) {
34222       error = mprotect_fixup(vma, nstart, end, newflags);
34223       break;
34224     }
34225 
34226     tmp = vma->vm_end;
34227     next = vma->vm_next;
34228     error = mprotect_fixup(vma, nstart, tmp, newflags);
34229     if (error)
34230       break;
34231     nstart = tmp;
34232     vma = next;
34233     if (!vma || vma->vm_start != nstart) {
34234       error = -EFAULT;
34235       break;
34236     }
34237   }
34238   merge_segments(current->mm, start, end);
34239 out:
34240   unlock_kernel();
34241   up(&current->mm->mmap_sem);
34242   return error;
34243 }

netlib.narod.ru< Назад | Оглавление | Далее >

Сайт управляется системой uCoz