include/asm-i386/pgtable.h
10876 #ifndef _I386_PGTABLE_H
10877 #define _I386_PGTABLE_H
10878
10879 #include <linux/config.h>
10880
10881 /* The Linux memory management assumes a three-level page
10882 * table setup. On the i386, we use that, but "fold" the
10883 * mid level into the top-level page table, so that we
10884 * physically have the same two-level page table as the
10885 * i386 mmu expects.
10886 *
10887 * This file contains the functions and defines necessary
10888 * to modify and use the i386 page table tree. */
10889 #ifndef __ASSEMBLY__
10890 #include <asm/processor.h>
10891 #include <asm/fixmap.h>
10892 #include <linux/tasks.h>
10893
10894 /* Caches aren't brain-dead on the intel. */
10895 #define flush_cache_all() do { } while (0)
10896 #define flush_cache_mm(mm) do { } while (0)
10897 #define flush_cache_range(mm, start, end) \
10898 do { } while (0)
10899 #define flush_cache_page(vma, vmaddr) do { } while (0)
10900 #define flush_page_to_ram(page) do { } while (0)
10901 #define flush_icache_range(start, end) do { } while (0)
10902
10903 /* TLB flushing:
10904 *
10905 * - flush_tlb() flushes the current mm struct TLBs
10906 * - flush_tlb_all() flushes all processes TLBs
10907 * - flush_tlb_mm(mm) flushes the specified mm context
10908 * TLB's
10909 * - flush_tlb_page(vma, vmaddr) flushes one page
10910 * - flush_tlb_range(mm, start, end) flushes a range of
10911 * pages
10912 *
10913 * ..but the i386 has somewhat limited tlb flushing
10914 * capabilities, and page-granular flushes are available
10915 * only on i486 and up. */
10916
10917 #define __flush_tlb() \
10918 do { unsigned long tmpreg; \
10919 __asm__ __volatile__("movl %%cr3,%0\n\tmovl %0,%%cr3": \
10920 "=r" (tmpreg) : :"memory"); } while (0)
10921
10922 #ifndef CONFIG_X86_INVLPG
10923 #define __flush_tlb_one(addr) flush_tlb()
10924 #else
10925 #define __flush_tlb_one(addr) \
10926 __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
10927 #endif
10928
10929 #ifndef __SMP__
10930
10931 #define flush_tlb() __flush_tlb()
10932 #define flush_tlb_all() __flush_tlb()
10933 #define local_flush_tlb() __flush_tlb()
10934
10935 static inline void flush_tlb_mm(struct mm_struct *mm)
10936 {
10937 if (mm == current->mm)
10938 __flush_tlb();
10939 }
10940
10941 static inline void flush_tlb_page(
10942 struct vm_area_struct *vma, unsigned long addr)
10943 {
10944 if (vma->vm_mm == current->mm)
10945 __flush_tlb_one(addr);
10946 }
10947
10948 static inline void flush_tlb_range(struct mm_struct *mm,
10949 unsigned long start, unsigned long end)
10950 {
10951 if (mm == current->mm)
10952 __flush_tlb();
10953 }
10954
10955 #else
10956
10957 /* We aren't very clever about this yet - SMP could
10958 * certainly avoid some global flushes.. */
10959 #include <asm/smp.h>
10960
10961 #define local_flush_tlb() \
10962 __flush_tlb()
10963
10964
10965 #define CLEVER_SMP_INVALIDATE
10966 #ifdef CLEVER_SMP_INVALIDATE
10967
10968 /* Smarter SMP flushing macros. c/o Linus Torvalds.
10969 * These mean you can really definitely utterly forget
10970 * about writing to user space from interrupts. (It's not
10971 * allowed anyway). */
10972
10973 static inline void flush_tlb_current_task(void)
10974 {
10975 /* just one copy of this mm? */
10976 if (atomic_read(¤t->mm->count) == 1)
10977 local_flush_tlb(); /* and that's us, so.. */
10978 else
10979 smp_flush_tlb();
10980 }
10981
10982 #define flush_tlb() flush_tlb_current_task()
10983
10984 #define flush_tlb_all() smp_flush_tlb()
10985
10986 static inline void flush_tlb_mm(struct mm_struct * mm)
10987 {
10988 if (mm == current->mm && atomic_read(&mm->count) == 1)
10989 local_flush_tlb();
10990 else
10991 smp_flush_tlb();
10992 }
10993
10994 static inline void flush_tlb_page(
10995 struct vm_area_struct * vma, unsigned long va)
10996 {
10997 if (vma->vm_mm == current->mm &&
10998 atomic_read(¤t->mm->count) == 1)
10999 __flush_tlb_one(va);
11000 else
11001 smp_flush_tlb();
11002 }
11003
11004 static inline void flush_tlb_range(struct mm_struct * mm,
11005 unsigned long start, unsigned long end)
11006 {
11007 flush_tlb_mm(mm);
11008 }
11009
11010
11011 #else
11012
11013 #define flush_tlb() \
11014 smp_flush_tlb()
11015
11016 #define flush_tlb_all() flush_tlb()
11017
11018 static inline void flush_tlb_mm(struct mm_struct *mm)
11019 {
11020 flush_tlb();
11021 }
11022
11023 static inline void flush_tlb_page(
11024 struct vm_area_struct *vma, unsigned long addr)
11025 {
11026 flush_tlb();
11027 }
11028
11029 static inline void flush_tlb_range(struct mm_struct *mm,
11030 unsigned long start, unsigned long end)
11031 {
11032 flush_tlb();
11033 }
11034 #endif
11035 #endif
11036 #endif /* !__ASSEMBLY__ */
11037
11038
11039 /* Certain architectures need to do special things when
11040 * PTEs within a page table are directly modified. Thus,
11041 * the following hook is made available. */
11042 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
11043
11044 /* PMD_SHIFT determines the size of the area a
11045 * second-level page table can map */
11046 #define PMD_SHIFT 22
11047 #define PMD_SIZE (1UL << PMD_SHIFT)
11048 #define PMD_MASK (~(PMD_SIZE-1))
11049
11050 /* PGDIR_SHIFT determines what a third-level page table
11051 * entry can map */
11052 #define PGDIR_SHIFT 22
11053 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
11054 #define PGDIR_MASK (~(PGDIR_SIZE-1))
11055
11056 /* entries per page directory level: the i386 is
11057 * two-level, so we don't really have any PMD directory
11058 * physically. */
11059 #define PTRS_PER_PTE 1024
11060 #define PTRS_PER_PMD 1
11061 #define PTRS_PER_PGD 1024
11062 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
11063
11064 /* pgd entries used up by user/kernel: */
11065
11066 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
11067 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
11068 #define __USER_PGD_PTRS \
11069 ((__PAGE_OFFSET >> PGDIR_SHIFT) & 0x3ff)
11070 #define __KERNEL_PGD_PTRS (PTRS_PER_PGD-__USER_PGD_PTRS)
11071
11072 #ifndef __ASSEMBLY__
11073 /* Just any arbitrary offset to the start of the vmalloc
11074 * VM area: the current 8MB value just means that there
11075 * will be a 8MB "hole" after the physical memory until
11076 * the kernel virtual memory starts. That means that any
11077 * out-of-bounds memory accesses will hopefully be
11078 * caught. The vmalloc() routines leaves a hole of 4kB
11079 * between each vmalloced area for the same reason. ;) */
11080 #define VMALLOC_OFFSET (8*1024*1024)
11081 #define VMALLOC_START (((unsigned long) high_memory + \
11082 VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
11083 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
11084 #define VMALLOC_END (FIXADDR_START)
11085
11086 /* The 4MB page is guessing.. Detailed in the infamous
11087 * "Chapter H" of the Pentium details, but assuming intel
11088 * did the straightforward thing, this bit set in the
11089 * page directory entry just means that the page
11090 * directory entry points directly to a 4MB-aligned block
11091 * of memory. */
11092 #define _PAGE_PRESENT 0x001
11093 #define _PAGE_RW 0x002
11094 #define _PAGE_USER 0x004
11095 #define _PAGE_WT 0x008
11096 #define _PAGE_PCD 0x010
11097 #define _PAGE_ACCESSED 0x020
11098 #define _PAGE_DIRTY 0x040
11099 /* 4 MB page, Pentium+, if present.. */
11100 #define _PAGE_4M 0x080
11101 #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+*/
11102
11103 #define _PAGE_PROTNONE 0x080 /* If not present */
11104
11105 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW |\
11106 _PAGE_USER | _PAGE_ACCESSED |\
11107 _PAGE_DIRTY)
11108 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW |\
11109 _PAGE_ACCESSED | _PAGE_DIRTY)
11110 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED|\
11111 _PAGE_DIRTY)
11112
11113 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | \
11114 _PAGE_ACCESSED)
11115 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | \
11116 _PAGE_RW | \
11117 _PAGE_USER | \
11118 _PAGE_ACCESSED)
11119 #define PAGE_COPY __pgprot(_PAGE_PRESENT | \
11120 _PAGE_USER | \
11121 _PAGE_ACCESSED)
11122 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | \
11123 _PAGE_USER | \
11124 _PAGE_ACCESSED)
11125 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | \
11126 _PAGE_RW | \
11127 _PAGE_DIRTY | \
11128 _PAGE_ACCESSED)
11129 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | \
11130 _PAGE_DIRTY | \
11131 _PAGE_ACCESSED)
11132
11133 /* The i386 can't do page protection for execute, and
11134 * considers that the same are read. Also, write
11135 * permissions imply read permissions. This is the
11136 * closest we can get.. */
11137 #define __P000 PAGE_NONE
11138 #define __P001 PAGE_READONLY
11139 #define __P010 PAGE_COPY
11140 #define __P011 PAGE_COPY
11141 #define __P100 PAGE_READONLY
11142 #define __P101 PAGE_READONLY
11143 #define __P110 PAGE_COPY
11144 #define __P111 PAGE_COPY
11145
11146 #define __S000 PAGE_NONE
11147 #define __S001 PAGE_READONLY
11148 #define __S010 PAGE_SHARED
11149 #define __S011 PAGE_SHARED
11150 #define __S100 PAGE_READONLY
11151 #define __S101 PAGE_READONLY
11152 #define __S110 PAGE_SHARED
11153 #define __S111 PAGE_SHARED
11154
11155 /* Define this if things work differently on an i386 and
11156 * an i486: it will (on an i486) warn about kernel memory
11157 * accesses that are done without a
11158 * 'verify_area(VERIFY_WRITE,..)' */
11159 #undef TEST_VERIFY_AREA
11160
11161 /* page table for 0-4MB for everybody */
11162 extern unsigned long pg0[1024];
11163 /* zero page used for uninitialized stuff */
11164 extern unsigned long empty_zero_page[1024];
11165
11166 /* BAD_PAGETABLE is used when we need a bogus page-table,
11167 * while BAD_PAGE is used for a bogus page. ZERO_PAGE is
11168 * a global shared page that is always zero: used for
11169 * zero-mapped memory areas etc.. */
11170 extern pte_t __bad_page(void);
11171 extern pte_t * __bad_pagetable(void);
11172
11173 #define BAD_PAGETABLE __bad_pagetable()
11174 #define BAD_PAGE __bad_page()
11175 #define ZERO_PAGE ((unsigned long) empty_zero_page)
11176
11177 /* number of bits that fit into a memory pointer */
11178 #define BITS_PER_PTR (8*sizeof(unsigned long))
11179
11180 /* to align the pointer to a pointer address */
11181 #define PTR_MASK (~(sizeof(void*)-1))
11182
11183 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
11184 /* 64-bit machines, beware! SRB. */
11185 #define SIZEOF_PTR_LOG2 2
11186
11187 /* to find an entry in a page-table */
11188 #define PAGE_PTR(address) \
11189 ((unsigned long)(address)>> \
11190 (PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
11191
11192 /* to set the page-dir */
11193 #define SET_PAGE_DIR(tsk,pgdir) \
11194 do { \
11195 unsigned long __pgdir = __pa(pgdir); \
11196 (tsk)->tss.cr3 = __pgdir; \
11197 if ((tsk) == current) \
11198 __asm__ \
11199 __volatile__("movl %0,%%cr3": :"r" (__pgdir)); \
11200 } while (0)
11201
11202 #define pte_none(x) (!pte_val(x))
11203 #define pte_present(x) (pte_val(x) & \
11204 (_PAGE_PRESENT|_PAGE_PROTNONE))
11205 #define pte_clear(xp) do { pte_val(*(xp)) = 0; } while(0)
11206
11207 #define pmd_none(x) (!pmd_val(x))
11208 #define pmd_bad(x) \
11209 ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != \
11210 _KERNPG_TABLE)
11211 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
11212 #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while(0)
11213
11214 /* The "pgd_xxx()" functions here are trivial for a
11215 * folded two-level setup: the pgd is never bad, and a
11216 * pmd always exists (as it's folded into the pgd entry)
11217 */
11218 extern inline int pgd_none(pgd_t pgd) { return 0; }
11219 extern inline int pgd_bad(pgd_t pgd) { return 0; }
11220 extern inline int pgd_present(pgd_t pgd) { return 1; }
11221 extern inline void pgd_clear(pgd_t * pgdp) { }
11222
11223 /* The following only work if pte_present() is true.
11224 * Undefined behaviour if not.. */
11225 extern inline int pte_read(pte_t pte) \
11226 { return pte_val(pte) & _PAGE_USER; }
11227 extern inline int pte_exec(pte_t pte) \
11228 { return pte_val(pte) & _PAGE_USER; }
11229 extern inline int pte_dirty(pte_t pte) \
11230 { return pte_val(pte) & _PAGE_DIRTY; }
11231 extern inline int pte_young(pte_t pte) \
11232 { return pte_val(pte) & _PAGE_ACCESSED; }
11233 extern inline int pte_write(pte_t pte) \
11234 { return pte_val(pte) & _PAGE_RW; }
11235
11236 extern inline pte_t pte_rdprotect(pte_t pte) \
11237 { pte_val(pte) &= ~_PAGE_USER; return pte; }
11238 extern inline pte_t pte_exprotect(pte_t pte) \
11239 { pte_val(pte) &= ~_PAGE_USER; return pte; }
11240 extern inline pte_t pte_mkclean(pte_t pte) \
11241 { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
11242 extern inline pte_t pte_mkold(pte_t pte) \
11243 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
11244 extern inline pte_t pte_wrprotect(pte_t pte) \
11245 { pte_val(pte) &= ~_PAGE_RW; return pte; }
11246 extern inline pte_t pte_mkread(pte_t pte) \
11247 { pte_val(pte) |= _PAGE_USER; return pte; }
11248 extern inline pte_t pte_mkexec(pte_t pte) \
11249 { pte_val(pte) |= _PAGE_USER; return pte; }
11250 extern inline pte_t pte_mkdirty(pte_t pte) \
11251 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
11252 extern inline pte_t pte_mkyoung(pte_t pte) \
11253 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
11254 extern inline pte_t pte_mkwrite(pte_t pte) \
11255 { pte_val(pte) |= _PAGE_RW; return pte; }
11256
11257 /* Conversion functions: convert a page and protection to
11258 * a page entry, and a page entry and page directory to
11259 * the page they refer to. */
11260 #define mk_pte(page, pgprot) \
11261 ({ pte_t __pte; \
11262 pte_val(__pte) = __pa(page) + pgprot_val(pgprot); \
11263 __pte; })
11264
11265 /* This takes a physical page address that is used by the
11266 * remapping functions */
11267 #define mk_pte_phys(physpage, pgprot) \
11268 ({ pte_t __pte; \
11269 pte_val(__pte) = physpage + pgprot_val(pgprot); \
11270 __pte; })
11271
11272 extern inline pte_t pte_modify(pte_t pte,
11273 pgprot_t newprot)
11274 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) |
11275 pgprot_val(newprot); return pte; }
11276
11277 #define pte_page(pte) \
11278 ((unsigned long) __va(pte_val(pte) & PAGE_MASK))
11279
11280 #define pmd_page(pmd) \
11281 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
11282
11283 /* to find an entry in a page-table-directory */
11284 #define pgd_offset(mm, address) \
11285 ((mm)->pgd + ((address) >> PGDIR_SHIFT))
11286
11287 /* to find an entry in a kernel page-table-directory */
11288 #define pgd_offset_k(address) \
11289 pgd_offset(&init_mm, address)
11290
11291 /* Find an entry in the second-level page table.. */
11292 extern inline pmd_t * pmd_offset(pgd_t * dir,
11293 unsigned long address)
11294 {
11295 return (pmd_t *) dir;
11296 }
11297
11298 /* Find an entry in the third-level page table.. */
11299 #define pte_offset(pmd, address) \
11300 ((pte_t *) (pmd_page(*pmd) + ((address>>10) & \
11301 ((PTRS_PER_PTE-1)<<2))))
11302
11303 /* Allocate and free page tables. The xxx_kernel()
11304 * versions are used to allocate a kernel page table -
11305 * this turns on ASN bits if any. */
11306
11307 #define pgd_quicklist (current_cpu_data.pgd_quick)
11308 #define pmd_quicklist ((unsigned long *)0)
11309 #define pte_quicklist (current_cpu_data.pte_quick)
11310 #define pgtable_cache_size \
11311 (current_cpu_data.pgtable_cache_sz)
11312
11313 extern __inline__ pgd_t *get_pgd_slow(void)
11314 {
11315 pgd_t *ret=(pgd_t *)__get_free_page(GFP_KERNEL), *init;
11316
11317 if (ret) {
11318 init = pgd_offset(&init_mm, 0);
11319 memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
11320 memcpy(ret + USER_PTRS_PER_PGD,
11321 init + USER_PTRS_PER_PGD,
11322 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
11323 }
11324 return ret;
11325 }
11326
11327 extern __inline__ pgd_t *get_pgd_fast(void)
11328 {
11329 unsigned long *ret;
11330
11331 if((ret = pgd_quicklist) != NULL) {
11332 pgd_quicklist = (unsigned long *)(*ret);
11333 ret[0] = ret[1];
11334 pgtable_cache_size--;
11335 } else
11336 ret = (unsigned long *)get_pgd_slow();
11337 return (pgd_t *)ret;
11338 }
11339
11340 extern __inline__ void free_pgd_fast(pgd_t *pgd)
11341 {
11342 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
11343 pgd_quicklist = (unsigned long *) pgd;
11344 pgtable_cache_size++;
11345 }
11346
11347 extern __inline__ void free_pgd_slow(pgd_t *pgd)
11348 {
11349 free_page((unsigned long)pgd);
11350 }
11351
11352 extern pte_t *get_pte_slow(pmd_t *pmd,
11353 unsigned long address_preadjusted);
11354 extern pte_t *get_pte_kernel_slow(pmd_t *pmd,
11355 unsigned long address_preadjusted);
11356
11357 extern __inline__ pte_t *get_pte_fast(void)
11358 {
11359 unsigned long *ret;
11360
11361 if((ret = (unsigned long *)pte_quicklist) != NULL) {
11362 pte_quicklist = (unsigned long *)(*ret);
11363 ret[0] = ret[1];
11364 pgtable_cache_size--;
11365 }
11366 return (pte_t *)ret;
11367 }
11368
11369 extern __inline__ void free_pte_fast(pte_t *pte)
11370 {
11371 *(unsigned long *)pte = (unsigned long) pte_quicklist;
11372 pte_quicklist = (unsigned long *) pte;
11373 pgtable_cache_size++;
11374 }
11375
11376 extern __inline__ void free_pte_slow(pte_t *pte)
11377 {
11378 free_page((unsigned long)pte);
11379 }
11380
11381 /* We don't use pmd cache, so these are dummy routines */
11382 extern __inline__ pmd_t *get_pmd_fast(void)
11383 {
11384 return (pmd_t *)0;
11385 }
11386
11387 extern __inline__ void free_pmd_fast(pmd_t *pmd)
11388 {
11389 }
11390
11391 extern __inline__ void free_pmd_slow(pmd_t *pmd)
11392 {
11393 }
11394
11395 extern void __bad_pte(pmd_t *pmd);
11396 extern void __bad_pte_kernel(pmd_t *pmd);
11397
11398 #define pte_free_kernel(pte) free_pte_fast(pte)
11399 #define pte_free(pte) free_pte_fast(pte)
11400 #define pgd_free(pgd) free_pgd_fast(pgd)
11401 #define pgd_alloc() get_pgd_fast()
11402
11403 extern inline pte_t * pte_alloc_kernel(
11404 pmd_t * pmd, unsigned long address)
11405 {
11406 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
11407 if (pmd_none(*pmd)) {
11408 pte_t * page = (pte_t *) get_pte_fast();
11409
11410 if (!page)
11411 return get_pte_kernel_slow(pmd, address);
11412 pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
11413 return page + address;
11414 }
11415 if (pmd_bad(*pmd)) {
11416 __bad_pte_kernel(pmd);
11417 return NULL;
11418 }
11419 return (pte_t *) pmd_page(*pmd) + address;
11420 }
11421
11422 extern inline pte_t * pte_alloc(pmd_t * pmd,
11423 unsigned long address)
11424 {
11425 address = (address >> (PAGE_SHIFT-2)) &
11426 4*(PTRS_PER_PTE - 1);
11427
11428 if (pmd_none(*pmd))
11429 goto getnew;
11430 if (pmd_bad(*pmd))
11431 goto fix;
11432 return (pte_t *) (pmd_page(*pmd) + address);
11433 getnew:
11434 {
11435 unsigned long page = (unsigned long) get_pte_fast();
11436
11437 if (!page)
11438 return get_pte_slow(pmd, address);
11439 pmd_val(*pmd) = _PAGE_TABLE + __pa(page);
11440 return (pte_t *) (page + address);
11441 }
11442 fix:
11443 __bad_pte(pmd);
11444 return NULL;
11445 }
11446
11447 /* allocating and freeing a pmd is trivial: the 1-entry
11448 * pmd is inside the pgd, so has no extra memory
11449 * associated with it. */
11450 extern inline void pmd_free(pmd_t * pmd)
11451 {
11452 }
11453
11454 extern inline pmd_t * pmd_alloc(pgd_t * pgd,
11455 unsigned long address)
11456 {
11457 return (pmd_t *) pgd;
11458 }
11459
11460 #define pmd_free_kernel pmd_free
11461 #define pmd_alloc_kernel pmd_alloc
11462
11463 extern int do_check_pgt_cache(int, int);
11464
11465 extern inline void set_pgdir(unsigned long address,
11466 pgd_t entry)
11467 {
11468 struct task_struct * p;
11469 pgd_t *pgd;
11470 #ifdef __SMP__
11471 int i;
11472 #endif
11473
11474 read_lock(&tasklist_lock);
11475 for_each_task(p) {
11476 if (!p->mm)
11477 continue;
11478 *pgd_offset(p->mm,address) = entry;
11479 }
11480 read_unlock(&tasklist_lock);
11481 #ifndef __SMP__
11482 for (pgd = (pgd_t *)pgd_quicklist; pgd;
11483 pgd = (pgd_t *)*(unsigned long *)pgd)
11484 pgd[address >> PGDIR_SHIFT] = entry;
11485 #else
11486 /* To pgd_alloc/pgd_free, one holds master kernel lock
11487 * and so does our callee, so we can modify pgd caches
11488 * of other CPUs as well. -jj */
11489 for (i = 0; i < NR_CPUS; i++)
11490 for (pgd = (pgd_t *)cpu_data[i].pgd_quick; pgd;
11491 pgd = (pgd_t *)*(unsigned long *)pgd)
11492 pgd[address >> PGDIR_SHIFT] = entry;
11493 #endif
11494 }
11495
11496 extern pgd_t swapper_pg_dir[1024];
11497
11498 /* The i386 doesn't have any external MMU info: the
11499 * kernel page tables contain all the necessary
11500 * information. */
11501 extern inline void update_mmu_cache(
11502 struct vm_area_struct * vma,
11503 unsigned long address, pte_t pte)
11504 {
11505 }
11506
11507 #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f)
11508 #define SWP_OFFSET(entry) ((entry) >> 8)
11509 #define SWP_ENTRY(type,offset) \
11510 (((type) << 1) | ((offset) << 8))
11511
11512 #define module_map vmalloc
11513 #define module_unmap vfree
11514
11515 #endif /* !__ASSEMBLY__ */
11516
11517 /* Needs to be defined here and not in linux/mm.h, as it
11518 * is arch dependent */
11519 #define PageSkip(page) (0)
11520 #define kern_addr_valid(addr) (1)
11521
11522 #endif /* _I386_PAGE_H */
Сайт управляется системой
uCoz