mm/mremap.c
34244 /*
34245 * linux/mm/remap.c
34246 *
34247 * (C) Copyright 1996 Linus Torvalds
34248 */
34249
34250 #include <linux/slab.h>
34251 #include <linux/smp_lock.h>
34252 #include <linux/shm.h>
34253 #include <linux/mman.h>
34254 #include <linux/swap.h>
34255
34256 #include <asm/uaccess.h>
34257 #include <asm/pgtable.h>
34258
34259 extern int vm_enough_memory(long pages);
34260
34261 static inline pte_t *get_one_pte(struct mm_struct *mm,
34262 unsigned long addr)
34263 {
34264 pgd_t * pgd;
34265 pmd_t * pmd;
34266 pte_t * pte = NULL;
34267
34268 pgd = pgd_offset(mm, addr);
34269 if (pgd_none(*pgd))
34270 goto end;
34271 if (pgd_bad(*pgd)) {
34272 printk("move_one_page: bad source pgd (%08lx)\n",
34273 pgd_val(*pgd));
34274 pgd_clear(pgd);
34275 goto end;
34276 }
34277
34278 pmd = pmd_offset(pgd, addr);
34279 if (pmd_none(*pmd))
34280 goto end;
34281 if (pmd_bad(*pmd)) {
34282 printk("move_one_page: bad source pmd (%08lx)\n",
34283 pmd_val(*pmd));
34284 pmd_clear(pmd);
34285 goto end;
34286 }
34287
34288 pte = pte_offset(pmd, addr);
34289 if (pte_none(*pte))
34290 pte = NULL;
34291 end:
34292 return pte;
34293 }
34294
34295 static inline pte_t *alloc_one_pte(struct mm_struct *mm,
34296 unsigned long addr)
34297 {
34298 pmd_t * pmd;
34299 pte_t * pte = NULL;
34300
34301 pmd = pmd_alloc(pgd_offset(mm, addr), addr);
34302 if (pmd)
34303 pte = pte_alloc(pmd, addr);
34304 return pte;
34305 }
34306
34307 static inline int copy_one_pte(pte_t * src, pte_t * dst)
34308 {
34309 int error = 0;
34310 pte_t pte = *src;
34311
34312 if (!pte_none(pte)) {
34313 error++;
34314 if (dst) {
34315 pte_clear(src);
34316 set_pte(dst, pte);
34317 error--;
34318 }
34319 }
34320 return error;
34321 }
34322
34323 static int move_one_page(struct mm_struct *mm,
34324 unsigned long old_addr, unsigned long new_addr)
34325 {
34326 int error = 0;
34327 pte_t * src;
34328
34329 src = get_one_pte(mm, old_addr);
34330 if (src)
34331 error = copy_one_pte(src,
34332 alloc_one_pte(mm, new_addr));
34333 return error;
34334 }
34335
34336 static int move_page_tables(struct mm_struct * mm,
34337 unsigned long new_addr, unsigned long old_addr,
34338 unsigned long len)
34339 {
34340 unsigned long offset = len;
34341
34342 flush_cache_range(mm, old_addr, old_addr + len);
34343 flush_tlb_range(mm, old_addr, old_addr + len);
34344
34345 /* This is not the clever way to do this, but we're
34346 * taking the easy way out on the assumption that most
34347 * remappings will be only a few pages.. This also
34348 * makes error recovery easier. */
34349 while (offset) {
34350 offset -= PAGE_SIZE;
34351 if (move_one_page(mm, old_addr + offset,
34352 new_addr + offset))
34353 goto oops_we_failed;
34354 }
34355 return 0;
34356
34357 /* Ok, the move failed because we didn't have enough
34358 * pages for the new page table tree. This is unlikely,
34359 * but we have to take the possibility into account. In
34360 * that case we just move all the pages back (this will
34361 * work, because we still have the old page tables) */
34362 oops_we_failed:
34363 flush_cache_range(mm, new_addr, new_addr + len);
34364 while ((offset += PAGE_SIZE) < len)
34365 move_one_page(mm, new_addr + offset,
34366 old_addr + offset);
34367 zap_page_range(mm, new_addr, new_addr + len);
34368 flush_tlb_range(mm, new_addr, new_addr + len);
34369 return -1;
34370 }
34371
34372 static inline unsigned long move_vma(
34373 struct vm_area_struct * vma, unsigned long addr,
34374 unsigned long old_len, unsigned long new_len)
34375 {
34376 struct vm_area_struct * new_vma;
34377
34378 new_vma = kmem_cache_alloc(vm_area_cachep,SLAB_KERNEL);
34379 if (new_vma) {
34380 unsigned long new_addr = get_unmapped_area(addr,
34381 new_len);
34382
34383 if (new_addr &&
34384 !move_page_tables(current->mm, new_addr, addr,
34385 old_len)) {
34386 *new_vma = *vma;
34387 new_vma->vm_start = new_addr;
34388 new_vma->vm_end = new_addr+new_len;
34389 new_vma->vm_offset =
34390 vma->vm_offset + (addr - vma->vm_start);
34391 if (new_vma->vm_file)
34392 new_vma->vm_file->f_count++;
34393 if (new_vma->vm_ops && new_vma->vm_ops->open)
34394 new_vma->vm_ops->open(new_vma);
34395 insert_vm_struct(current->mm, new_vma);
34396 merge_segments(current->mm, new_vma->vm_start,
34397 new_vma->vm_end);
34398 do_munmap(addr, old_len);
34399 current->mm->total_vm += new_len >> PAGE_SHIFT;
34400 if (new_vma->vm_flags & VM_LOCKED) {
34401 current->mm->locked_vm += new_len >> PAGE_SHIFT;
34402 make_pages_present(new_vma->vm_start,
34403 new_vma->vm_end);
34404 }
34405 return new_addr;
34406 }
34407 kmem_cache_free(vm_area_cachep, new_vma);
34408 }
34409 return -ENOMEM;
34410 }
34411
34412 /* Expand (or shrink) an existing mapping, potentially
34413 * moving it at the same time (controlled by the
34414 * MREMAP_MAYMOVE flag and available VM space) */
34415 asmlinkage unsigned long sys_mremap(unsigned long addr,
34416 unsigned long old_len, unsigned long new_len,
34417 unsigned long flags)
34418 {
34419 struct vm_area_struct *vma;
34420 unsigned long ret = -EINVAL;
34421
34422 down(¤t->mm->mmap_sem);
34423 lock_kernel();
34424 if (addr & ~PAGE_MASK)
34425 goto out;
34426 old_len = PAGE_ALIGN(old_len);
34427 new_len = PAGE_ALIGN(new_len);
34428
34429 /* Always allow a shrinking remap: that just unmaps the
34430 * unnecessary pages.. */
34431 ret = addr;
34432 if (old_len >= new_len) {
34433 do_munmap(addr+new_len, old_len - new_len);
34434 goto out;
34435 }
34436
34437 /* Ok, we need to grow.. */
34438 ret = -EFAULT;
34439 vma = find_vma(current->mm, addr);
34440 if (!vma || vma->vm_start > addr)
34441 goto out;
34442 /* We can't remap across vm area boundaries */
34443 if (old_len > vma->vm_end - addr)
34444 goto out;
34445 if (vma->vm_flags & VM_LOCKED) {
34446 unsigned long locked =
34447 current->mm->locked_vm << PAGE_SHIFT;
34448 locked += new_len - old_len;
34449 ret = -EAGAIN;
34450 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
34451 goto out;
34452 }
34453 ret = -ENOMEM;
34454 if ((current->mm->total_vm << PAGE_SHIFT) +
34455 (new_len - old_len)
34456 > current->rlim[RLIMIT_AS].rlim_cur)
34457 goto out;
34458 /* Private writable mapping? Check memory
34459 * availability.. */
34460 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) == VM_WRITE
34461 && !(flags & MAP_NORESERVE) &&
34462 !vm_enough_memory((new_len - old_len) >>
34463 PAGE_SHIFT))
34464 goto out;
34465
34466 /* old_len exactly to the end of the area.. */
34467 if (old_len == vma->vm_end - addr &&
34468 (old_len != new_len || !(flags & MREMAP_MAYMOVE))){
34469 unsigned long max_addr = TASK_SIZE;
34470 if (vma->vm_next)
34471 max_addr = vma->vm_next->vm_start;
34472 /* can we just expand the current mapping? */
34473 if (max_addr - addr >= new_len) {
34474 int pages = (new_len - old_len) >> PAGE_SHIFT;
34475 vma->vm_end = addr + new_len;
34476 current->mm->total_vm += pages;
34477 if (vma->vm_flags & VM_LOCKED) {
34478 current->mm->locked_vm += pages;
34479 make_pages_present(addr + old_len,
34480 addr + new_len);
34481 }
34482 ret = addr;
34483 goto out;
34484 }
34485 }
34486
34487 /* We weren't able to just expand or shrink the area,
34488 * we need to create a new one and move it.. */
34489 if (flags & MREMAP_MAYMOVE)
34490 ret = move_vma(vma, addr, old_len, new_len);
34491 else
34492 ret = -ENOMEM;
34493 out:
34494 unlock_kernel();
34495 up(¤t->mm->mmap_sem);
34496 return ret;
34497 }
Сайт управляется системой
uCoz