]> xenbits.xen.org Git - xenclient/kernel.git/commitdiff
Eliminate the pmd_walker struct in the page walker. maps2-patches/maps2-uninline-some-functions-in-the-page-walker.patch
authorMatt Mackall <mpm@selenic.com>
Tue, 6 Jan 2009 12:06:05 +0000 (12:06 +0000)
committerMatt Mackall <mpm@selenic.com>
Tue, 6 Jan 2009 12:06:05 +0000 (12:06 +0000)
This slightly simplifies things for the next few cleanups.

Signed-off-by: Matt Mackall <mpm@selenic.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 fs/proc/task_mmu.c |   47 ++++++++++++++++++-------------------------
 1 file changed, 20 insertions(+), 27 deletions(-)

fs/proc/task_mmu.c

index 5691dfee720c500cec234f8e9f8b5ac33a21781d..c875f92a43c71214c112f3b4e2c7747cf0422a40 100644 (file)
@@ -125,6 +125,7 @@ static void pad_len_spaces(struct seq_file *m, int len)
 
 struct mem_size_stats
 {
+       struct vm_area_struct *vma;
        unsigned long resident;
        unsigned long shared_clean;
        unsigned long shared_dirty;
@@ -133,13 +134,6 @@ struct mem_size_stats
        unsigned long referenced;
 };
 
-struct pmd_walker {
-       struct vm_area_struct *vma;
-       void *private;
-       void (*action)(struct vm_area_struct *, pmd_t *, unsigned long,
-                      unsigned long, void *);
-};
-
 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
 {
        return NULL;
@@ -242,11 +236,11 @@ static int show_map(struct seq_file *m, void *v)
        return show_map_internal(m, v, NULL);
 }
 
-static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
-                           unsigned long addr, unsigned long end,
+static void smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                            void *private)
 {
        struct mem_size_stats *mss = private;
+       struct vm_area_struct *vma = mss->vma;
        pte_t *pte, ptent;
        spinlock_t *ptl;
        struct page *page;
@@ -282,10 +276,10 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
        cond_resched();
 }
 
-static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
-                                unsigned long addr, unsigned long end,
-                                void *private)
+static void clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
+                                unsigned long end, void *private)
 {
+       struct vm_area_struct *vma = private;
        pte_t *pte, ptent;
        spinlock_t *ptl;
        struct page *page;
@@ -308,8 +302,10 @@ static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
        cond_resched();
 }
 
-static void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
-                                 unsigned long addr, unsigned long end)
+static void walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
+                          void (*action)(pmd_t *, unsigned long,
+                                         unsigned long, void *),
+                          void *private)
 {
        pmd_t *pmd;
        unsigned long next;
@@ -319,12 +315,14 @@ static void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
                next = pmd_addr_end(addr, end);
                if (pmd_none_or_clear_bad(pmd))
                        continue;
-               walker->action(walker->vma, pmd, addr, next, walker->private);
+               action(pmd, addr, next, private);
        }
 }
 
-static void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
-                                 unsigned long addr, unsigned long end)
+static void walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
+                          void (*action)(pmd_t *, unsigned long,
+                                         unsigned long, void *),
+                          void *private)
 {
        pud_t *pud;
        unsigned long next;
@@ -334,7 +332,7 @@ static void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
                next = pud_addr_end(addr, end);
                if (pud_none_or_clear_bad(pud))
                        continue;
-               walk_pmd_range(walker, pud, addr, next);
+               walk_pmd_range(pud, addr, next, action, private);
        }
 }
 
@@ -348,18 +346,12 @@ static void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
  * a callback for every bottom-level (PTE) page table.
  */
 static void walk_page_range(struct vm_area_struct *vma,
-                           void (*action)(struct vm_area_struct *,
-                                          pmd_t *, unsigned long,
+                           void (*action)(pmd_t *, unsigned long,
                                           unsigned long, void *),
                            void *private)
 {
        unsigned long addr = vma->vm_start;
        unsigned long end = vma->vm_end;
-       struct pmd_walker walker = {
-               .vma            = vma,
-               .private        = private,
-               .action         = action,
-       };
        pgd_t *pgd;
        unsigned long next;
 
@@ -368,7 +360,7 @@ static void walk_page_range(struct vm_area_struct *vma,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               walk_pud_range(&walker, pgd, addr, next);
+               walk_pud_range(pgd, addr, next, action, private);
        }
 }
 
@@ -378,6 +370,7 @@ static int show_smap(struct seq_file *m, void *v)
        struct mem_size_stats mss;
 
        memset(&mss, 0, sizeof mss);
+       mss.vma = vma;
        if (vma->vm_mm && !is_vm_hugetlb_page(vma))
                walk_page_range(vma, smaps_pte_range, &mss);
        return show_map_internal(m, v, &mss);
@@ -390,7 +383,7 @@ void clear_refs_smap(struct mm_struct *mm)
        down_read(&mm->mmap_sem);
        for (vma = mm->mmap; vma; vma = vma->vm_next)
                if (vma->vm_mm && !is_vm_hugetlb_page(vma))
-                       walk_page_range(vma, clear_refs_pte_range, NULL);
+                       walk_page_range(vma, clear_refs_pte_range, vma);
        flush_tlb_mm(mm);
        up_read(&mm->mmap_sem);
 }