]> xenbits.xen.org Git - xenclient/kernel.git/commitdiff
This makes the walker more generic. maps2-patches/maps2-eliminate-the-pmd_walker-struct-in-the-page-walker.patch
authorMatt Mackall <mpm@selenic.com>
Tue, 6 Jan 2009 12:06:05 +0000 (12:06 +0000)
committerMatt Mackall <mpm@selenic.com>
Tue, 6 Jan 2009 12:06:05 +0000 (12:06 +0000)
Signed-off-by: Matt Mackall <mpm@selenic.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 fs/proc/task_mmu.c |   19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

fs/proc/task_mmu.c

index c875f92a43c71214c112f3b4e2c7747cf0422a40..922e19f79b6382ba98eb7ca490c56c5db84990bb 100644 (file)
@@ -337,25 +337,26 @@ static void walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
 }
 
 /*
- * walk_page_range - walk the page tables of a VMA with a callback
- * @vma - VMA to walk
+ * walk_page_range - walk a memory map's page tables with a callback
+ * @mm - memory map to walk
+ * @addr - starting address
+ * @end - ending address
  * @action - callback invoked for every bottom-level (PTE) page table
  * @private - private data passed to the callback function
  *
  * Recursively walk the page table for the memory area in a VMA, calling
  * a callback for every bottom-level (PTE) page table.
  */
-static void walk_page_range(struct vm_area_struct *vma,
+static void walk_page_range(struct mm_struct *mm,
+                           unsigned long addr, unsigned long end,
                            void (*action)(pmd_t *, unsigned long,
                                           unsigned long, void *),
                            void *private)
 {
-       unsigned long addr = vma->vm_start;
-       unsigned long end = vma->vm_end;
        pgd_t *pgd;
        unsigned long next;
 
-       for (pgd = pgd_offset(vma->vm_mm, addr); addr != end;
+       for (pgd = pgd_offset(mm, addr); addr != end;
             pgd++, addr = next) {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
@@ -372,7 +373,8 @@ static int show_smap(struct seq_file *m, void *v)
        memset(&mss, 0, sizeof mss);
        mss.vma = vma;
        if (vma->vm_mm && !is_vm_hugetlb_page(vma))
-               walk_page_range(vma, smaps_pte_range, &mss);
+               walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end,
+                               smaps_pte_range, &mss);
        return show_map_internal(m, v, &mss);
 }
 
@@ -383,7 +385,8 @@ void clear_refs_smap(struct mm_struct *mm)
        down_read(&mm->mmap_sem);
        for (vma = mm->mmap; vma; vma = vma->vm_next)
                if (vma->vm_mm && !is_vm_hugetlb_page(vma))
-                       walk_page_range(vma, clear_refs_pte_range, vma);
+                       walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end,
+                                       clear_refs_pte_range, vma);
        flush_tlb_mm(mm);
        up_read(&mm->mmap_sem);
 }