debuggers.hg

view xen/arch/ia64/patch/linux-2.6.11/pgalloc.h @ 4615:58efb3448933

bitkeeper revision 1.1327.1.1 (426536d2PUqtjTi2v06bzD10RFwarg)

Merge bk://xen.bkbits.net/xeno-unstable.bk
into bkbits.net:/repos/x/xen-ia64/xeno-unstable-ia64.bk
author xen-ia64.adm@bkbits.net
date Tue Apr 19 16:50:26 2005 +0000 (2005-04-19)
parents f1c946e1226a
children 5b9e241131fb 593703cf4935
line source
1 pgalloc.h | 17 +++++++++++------
2 1 files changed, 11 insertions(+), 6 deletions(-)
4 Index: linux-2.6.11-xendiffs/include/asm-ia64/pgalloc.h
5 ===================================================================
6 --- linux-2.6.11-xendiffs.orig/include/asm-ia64/pgalloc.h 2005-04-08 11:57:30.909774800 -0500
7 +++ linux-2.6.11-xendiffs/include/asm-ia64/pgalloc.h 2005-04-08 11:58:08.102711219 -0500
8 @@ -18,6 +18,7 @@
9 #include <linux/compiler.h>
10 #include <linux/mm.h>
11 #include <linux/page-flags.h>
12 +#include <linux/preempt.h>
13 #include <linux/threads.h>
15 #include <asm/mmu_context.h>
16 @@ -34,6 +35,10 @@
17 #define pmd_quicklist (local_cpu_data->pmd_quick)
18 #define pgtable_cache_size (local_cpu_data->pgtable_cache_sz)
20 +/* FIXME: Later 3 level page table should be over, to create
21 + * new interface upon xen memory allocator. To simplify first
22 + * effort moving to xen allocator, use xenheap pages temporarily.
23 + */
24 static inline pgd_t*
25 pgd_alloc_one_fast (struct mm_struct *mm)
26 {
27 @@ -61,7 +66,7 @@ pgd_alloc (struct mm_struct *mm)
28 pgd_t *pgd = pgd_alloc_one_fast(mm);
30 if (unlikely(pgd == NULL)) {
31 - pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
32 + pgd = (pgd_t *)alloc_xenheap_page();
33 }
34 return pgd;
35 }
36 @@ -104,7 +109,7 @@ pmd_alloc_one_fast (struct mm_struct *mm
37 static inline pmd_t*
38 pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
39 {
40 - pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
41 + pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
43 return pmd;
44 }
45 @@ -136,7 +141,7 @@ pmd_populate_kernel (struct mm_struct *m
46 static inline struct page *
47 pte_alloc_one (struct mm_struct *mm, unsigned long addr)
48 {
49 - struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
50 + struct page *pte = alloc_xenheap_page();
52 return pte;
53 }
54 @@ -144,7 +149,7 @@ pte_alloc_one (struct mm_struct *mm, uns
55 static inline pte_t *
56 pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
57 {
58 - pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
59 + pte_t *pte = (pte_t *)alloc_xenheap_page();
61 return pte;
62 }
63 @@ -152,13 +157,13 @@ pte_alloc_one_kernel (struct mm_struct *
64 static inline void
65 pte_free (struct page *pte)
66 {
67 - __free_page(pte);
68 + free_xenheap_page(pte);
69 }
71 static inline void
72 pte_free_kernel (pte_t *pte)
73 {
74 - free_page((unsigned long) pte);
75 + free_xenheap_page((unsigned long) pte);
76 }
78 #define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))