]> xenbits.xen.org Git - xenclient/kernel.git/commitdiff
This allows iterating over all levels of the page tables. Recursion continues maps2-patches/maps2-propagate-errors-from-callback-in-page-walker.patch
authorMatt Mackall <mpm@selenic.com>
Tue, 6 Jan 2009 12:06:05 +0000 (12:06 +0000)
committerMatt Mackall <mpm@selenic.com>
Tue, 6 Jan 2009 12:06:05 +0000 (12:06 +0000)
to the depth of the lowest supplied callback.

This makes the page walker nearly completely generic and should allow it to
replace some other hand-rolled page table walkers.

Signed-off-by: Matt Mackall <mpm@selenic.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 fs/proc/task_mmu.c |   89 +++++++++++++++++++++++++++++++------------
 1 file changed, 66 insertions(+), 23 deletions(-)

fs/proc/task_mmu.c

index 17d3bcd82b1d634fa5cbbeaf2ada0910f3327aad..0add187b60f21f8098b37289edcf6f506fe7ee27 100644 (file)
@@ -304,10 +304,35 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
        return 0;
 }
 
+struct mm_walk {
+       int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, void *);
+       int (*pud_entry)(pud_t *, unsigned long, unsigned long, void *);
+       int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, void *);
+       int (*pte_entry)(pte_t *, unsigned long, unsigned long, void *);
+};
+
+static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+                         struct mm_walk *walk, void *private)
+{
+       pte_t *pte;
+       int err;
+
+       for (pte = pte_offset_map(pmd, addr); addr != end;
+            addr += PAGE_SIZE, pte++) {
+               if (pte_none(*pte))
+                       continue;
+               err = walk->pte_entry(pte, addr, addr, private);
+               if (err) {
+                       pte_unmap(pte);
+                       return err;
+               }
+       }
+       pte_unmap(pte);
+       return 0;
+}
+
 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
-                         int (*action)(pmd_t *, unsigned long,
-                                       unsigned long, void *),
-                         void *private)
+                         struct mm_walk *walk, void *private)
 {
        pmd_t *pmd;
        unsigned long next;
@@ -318,18 +343,22 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
                next = pmd_addr_end(addr, end);
                if (pmd_none_or_clear_bad(pmd))
                        continue;
-               err = action(pmd, addr, next, private);
-               if (err)
-                       return err;
+               if (walk->pmd_entry) {
+                       err = walk->pmd_entry(pmd, addr, next, private);
+                       if (err)
+                               return err;
+               }
+               if (walk->pte_entry) {
+                       err = walk_pte_range(pmd, addr, next, walk, private);
+                       if (err)
+                               return err;
+               }
        }
-
        return 0;
 }
 
 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
-                         int (*action)(pmd_t *, unsigned long,
-                                       unsigned long, void *),
-                         void *private)
+                         struct mm_walk *walk, void *private)
 {
        pud_t *pud;
        unsigned long next;
@@ -340,11 +369,17 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
                next = pud_addr_end(addr, end);
                if (pud_none_or_clear_bad(pud))
                        continue;
-               err = walk_pmd_range(pud, addr, next, action, private);
-               if (err)
-                       return err;
+               if (walk->pud_entry) {
+                       err = walk->pud_entry(pud, addr, next, private);
+                       if (err)
+                               return err;
+               }
+               if (walk->pmd_entry || walk->pte_entry) {
+                       err = walk_pmd_range(pud, addr, next, walk, private);
+                       if (err)
+                               return err;
+               }
        }
-
        return 0;
 }
 
@@ -361,9 +396,7 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
  */
 static int walk_page_range(struct mm_struct *mm,
                           unsigned long addr, unsigned long end,
-                          int (*action)(pmd_t *, unsigned long,
-                                        unsigned long, void *),
-                          void *private)
+                          struct mm_walk *walk, void *private)
 {
        pgd_t *pgd;
        unsigned long next;
@@ -374,14 +407,22 @@ static int walk_page_range(struct mm_struct *mm,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               err = walk_pud_range(pgd, addr, next, action, private);
-               if (err)
-                       return err;
+               if (walk->pgd_entry) {
+                       err = walk->pgd_entry(pgd, addr, next, private);
+                       if (err)
+                               return err;
+               }
+               if (walk->pud_entry || walk->pmd_entry || walk->pte_entry) {
+                       err = walk_pud_range(pgd, addr, next, walk, private);
+                       if (err)
+                               return err;
+               }
        }
-
        return 0;
 }
 
+static struct mm_walk smaps_walk = { .pmd_entry = smaps_pte_range };
+
 static int show_smap(struct seq_file *m, void *v)
 {
        struct vm_area_struct *vma = v;
@@ -391,10 +432,12 @@ static int show_smap(struct seq_file *m, void *v)
        mss.vma = vma;
        if (vma->vm_mm && !is_vm_hugetlb_page(vma))
                walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end,
-                               smaps_pte_range, &mss);
+                               &smaps_walk, &mss);
        return show_map_internal(m, v, &mss);
 }
 
+static struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range };
+
 void clear_refs_smap(struct mm_struct *mm)
 {
        struct vm_area_struct *vma;
@@ -403,7 +446,7 @@ void clear_refs_smap(struct mm_struct *mm)
        for (vma = mm->mmap; vma; vma = vma->vm_next)
                if (vma->vm_mm && !is_vm_hugetlb_page(vma))
                        walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end,
-                                       clear_refs_pte_range, vma);
+                                       &clear_refs_walk, vma);
        flush_tlb_mm(mm);
        up_read(&mm->mmap_sem);
 }