]> xenbits.xen.org Git - xenclient/kernel.git/commitdiff
This patch series introduces /proc/pid/pagemap and /proc/kpagemap, which allow maps2-patches/00-backport-smaps-page-walker
authorMatt Mackall <mpm@selenic.com>
Tue, 6 Jan 2009 12:06:05 +0000 (12:06 +0000)
committerMatt Mackall <mpm@selenic.com>
Tue, 6 Jan 2009 12:06:05 +0000 (12:06 +0000)
detailed run-time examination of process memory usage at a page granularity.

The first several patches whip the page-walking code introduced for
/proc/pid/smaps and clear_refs into a more generic form, the next couple make
those interfaces optional, and the last two introduce the new interfaces, also
optional.

This respin adds simple, expandable headers to both pagemap and kpagemap as
suggested by Nikita.  I haven't moved pagewalk.c from lib/ to mm/ as suggested
by Nick as I still think lib is a better fit for its automatic conditional
linking.

This patch:

Uninline some functions in the page walker

Signed-off-by: Matt Mackall <mpm@selenic.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 fs/proc/task_mmu.c |   14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

fs/proc/task_mmu.c

index 6f470972abe83a76a049827d5bc614131af1a813..5691dfee720c500cec234f8e9f8b5ac33a21781d 100644 (file)
@@ -308,7 +308,7 @@ static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
        cond_resched();
 }
 
-static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
+static void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
                                  unsigned long addr, unsigned long end)
 {
        pmd_t *pmd;
@@ -323,7 +323,7 @@ static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
        }
 }
 
-static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
+static void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
                                  unsigned long addr, unsigned long end)
 {
        pud_t *pud;
@@ -347,11 +347,11 @@ static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
  * Recursively walk the page table for the memory area in a VMA, calling
  * a callback for every bottom-level (PTE) page table.
  */
-static inline void walk_page_range(struct vm_area_struct *vma,
-                                  void (*action)(struct vm_area_struct *,
-                                                 pmd_t *, unsigned long,
-                                                 unsigned long, void *),
-                                  void *private)
+static void walk_page_range(struct vm_area_struct *vma,
+                           void (*action)(struct vm_area_struct *,
+                                          pmd_t *, unsigned long,
+                                          unsigned long, void *),
+                           void *private)
 {
        unsigned long addr = vma->vm_start;
        unsigned long end = vma->vm_end;