]> xenbits.xen.org Git - xenclient/kernel.git/commitdiff
imported patch maps2-patches/00-backport-smaps-page-walker bonding-vlan-fixes.patch
authort_jeang <devnull@localhost>
Tue, 6 Jan 2009 12:06:05 +0000 (12:06 +0000)
committert_jeang <devnull@localhost>
Tue, 6 Jan 2009 12:06:05 +0000 (12:06 +0000)
fs/proc/base.c
fs/proc/internal.h
fs/proc/task_mmu.c

index 5227dbbae6b14308ef4d9396c44c9c9e307f1eb5..d930b04594a80b33bcb28fe8057ad7522bb08486 100644 (file)
@@ -118,6 +118,7 @@ enum pid_directory_inos {
        PROC_TGID_MOUNTSTATS,
        PROC_TGID_WCHAN,
 #ifdef CONFIG_MMU
+       PROC_TGID_CLEAR_REFS,
        PROC_TGID_SMAPS,
 #endif
 #ifdef CONFIG_SCHEDSTATS
@@ -164,6 +165,7 @@ enum pid_directory_inos {
        PROC_TID_MOUNTSTATS,
        PROC_TID_WCHAN,
 #ifdef CONFIG_MMU
+       PROC_TID_CLEAR_REFS,
        PROC_TID_SMAPS,
 #endif
 #ifdef CONFIG_SCHEDSTATS
@@ -227,6 +229,7 @@ static struct pid_entry tgid_base_stuff[] = {
        E(PROC_TGID_MOUNTS,    "mounts",  S_IFREG|S_IRUGO),
        E(PROC_TGID_MOUNTSTATS, "mountstats", S_IFREG|S_IRUSR),
 #ifdef CONFIG_MMU
+       E(PROC_TGID_CLEAR_REFS, "clear_refs", S_IFREG|S_IWUSR),
        E(PROC_TGID_SMAPS,     "smaps",   S_IFREG|S_IRUSR),
 #endif
 #ifdef CONFIG_SECURITY
@@ -276,6 +279,7 @@ static struct pid_entry tid_base_stuff[] = {
        E(PROC_TID_EXE,        "exe",     S_IFLNK|S_IRWXUGO),
        E(PROC_TID_MOUNTS,     "mounts",  S_IFREG|S_IRUGO),
 #ifdef CONFIG_MMU
+       E(PROC_TID_CLEAR_REFS, "clear_refs", S_IFREG|S_IWUSR),
        E(PROC_TID_SMAPS,      "smaps",   S_IFREG|S_IRUSR),
 #endif
 #ifdef CONFIG_SECURITY
@@ -1969,6 +1973,10 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
                        inode->i_fop = &proc_mounts_operations;
                        break;
 #ifdef CONFIG_MMU
+               case PROC_TID_CLEAR_REFS:
+               case PROC_TGID_CLEAR_REFS:
+                       inode->i_fop = &proc_clear_refs_operations;
+                       break;
                case PROC_TID_SMAPS:
                case PROC_TGID_SMAPS:
                        inode->i_fop = &proc_smaps_operations;
index a8368654e0623cfee431176b4779d0c114d2cfaf..23a09ba794a45234018db13b6d11f209a7cd7ba8 100644 (file)
@@ -43,6 +43,7 @@ extern int proc_pid_limits(struct task_struct *, char *);
 extern struct file_operations proc_maps_operations;
 extern struct file_operations proc_numa_maps_operations;
 extern struct file_operations proc_smaps_operations;
+extern struct file_operations proc_clear_refs_operations;
 
 extern struct file_operations proc_maps_operations;
 extern struct file_operations proc_numa_maps_operations;
index cdfef56983917ce232d00134c8a372ac49763bd3..6f470972abe83a76a049827d5bc614131af1a813 100644 (file)
@@ -130,6 +130,14 @@ struct mem_size_stats
        unsigned long shared_dirty;
        unsigned long private_clean;
        unsigned long private_dirty;
+       unsigned long referenced;
+};
+
+struct pmd_walker {
+       struct vm_area_struct *vma;
+       void *private;
+       void (*action)(struct vm_area_struct *, pmd_t *, unsigned long,
+                      unsigned long, void *);
 };
 
 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
@@ -215,12 +223,14 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
                           "Shared_Dirty:  %8lu kB\n"
                           "Private_Clean: %8lu kB\n"
                           "Private_Dirty: %8lu kB\n",
+                          "Referenced:     %8lu kB\n",
                           (vma->vm_end - vma->vm_start) >> 10,
                           mss->resident >> 10,
                           mss->shared_clean  >> 10,
                           mss->shared_dirty  >> 10,
                           mss->private_clean >> 10,
-                          mss->private_dirty >> 10);
+                          mss->private_dirty >> 10,
+                          mss->referenced >> 10);
 
        if (m->count < m->size)  /* vma is copied successfully */
                m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
@@ -233,15 +243,16 @@ static int show_map(struct seq_file *m, void *v)
 }
 
 static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
-                               unsigned long addr, unsigned long end,
-                               struct mem_size_stats *mss)
+                           unsigned long addr, unsigned long end,
+                           void *private)
 {
+       struct mem_size_stats *mss = private;
        pte_t *pte, ptent;
        spinlock_t *ptl;
        struct page *page;
 
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
-       do {
+       for (; addr != end; pte++, addr += PAGE_SIZE) {
                ptent = *pte;
                if (!pte_present(ptent))
                        continue;
@@ -252,6 +263,9 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                if (!page)
                        continue;
 
+               /* Accumulate the size in pages that have been accessed. */
+               if (pte_young(ptent) || PageReferenced(page))
+                       mss->referenced += PAGE_SIZE;
                if (page_mapcount(page) >= 2) {
                        if (pte_dirty(ptent))
                                mss->shared_dirty += PAGE_SIZE;
@@ -263,57 +277,99 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                        else
                                mss->private_clean += PAGE_SIZE;
                }
-       } while (pte++, addr += PAGE_SIZE, addr != end);
+       }
        pte_unmap_unlock(pte - 1, ptl);
        cond_resched();
 }
 
-static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
-                               unsigned long addr, unsigned long end,
-                               struct mem_size_stats *mss)
+static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+                                unsigned long addr, unsigned long end,
+                                void *private)
+{
+       pte_t *pte, ptent;
+       spinlock_t *ptl;
+       struct page *page;
+
+       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+       for (; addr != end; pte++, addr += PAGE_SIZE) {
+               ptent = *pte;
+               if (!pte_present(ptent))
+                       continue;
+
+               page = vm_normal_page(vma, addr, ptent);
+               if (!page)
+                       continue;
+
+               /* Clear accessed and referenced bits. */
+               ptep_test_and_clear_young(vma, addr, pte);
+               ClearPageReferenced(page);
+       }
+       pte_unmap_unlock(pte - 1, ptl);
+       cond_resched();
+}
+
+static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
+                                 unsigned long addr, unsigned long end)
 {
        pmd_t *pmd;
        unsigned long next;
 
-       pmd = pmd_offset(pud, addr);
-       do {
+       for (pmd = pmd_offset(pud, addr); addr != end;
+            pmd++, addr = next) {
                next = pmd_addr_end(addr, end);
                if (pmd_none_or_clear_bad(pmd))
                        continue;
-               smaps_pte_range(vma, pmd, addr, next, mss);
-       } while (pmd++, addr = next, addr != end);
+               walker->action(walker->vma, pmd, addr, next, walker->private);
+       }
 }
 
-static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
-                               unsigned long addr, unsigned long end,
-                               struct mem_size_stats *mss)
+static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
+                                 unsigned long addr, unsigned long end)
 {
        pud_t *pud;
        unsigned long next;
 
-       pud = pud_offset(pgd, addr);
-       do {
+       for (pud = pud_offset(pgd, addr); addr != end;
+            pud++, addr = next) {
                next = pud_addr_end(addr, end);
                if (pud_none_or_clear_bad(pud))
                        continue;
-               smaps_pmd_range(vma, pud, addr, next, mss);
-       } while (pud++, addr = next, addr != end);
+               walk_pmd_range(walker, pud, addr, next);
+       }
 }
 
-static inline void smaps_pgd_range(struct vm_area_struct *vma,
-                               unsigned long addr, unsigned long end,
-                               struct mem_size_stats *mss)
+/*
+ * walk_page_range - walk the page tables of a VMA with a callback
+ * @vma - VMA to walk
+ * @action - callback invoked for every bottom-level (PTE) page table
+ * @private - private data passed to the callback function
+ *
+ * Recursively walk the page table for the memory area in a VMA, calling
+ * a callback for every bottom-level (PTE) page table.
+ */
+static inline void walk_page_range(struct vm_area_struct *vma,
+                                  void (*action)(struct vm_area_struct *,
+                                                 pmd_t *, unsigned long,
+                                                 unsigned long, void *),
+                                  void *private)
 {
+       unsigned long addr = vma->vm_start;
+       unsigned long end = vma->vm_end;
+       struct pmd_walker walker = {
+               .vma            = vma,
+               .private        = private,
+               .action         = action,
+       };
        pgd_t *pgd;
        unsigned long next;
 
-       pgd = pgd_offset(vma->vm_mm, addr);
-       do {
+       for (pgd = pgd_offset(vma->vm_mm, addr); addr != end;
+            pgd++, addr = next) {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               smaps_pud_range(vma, pgd, addr, next, mss);
-       } while (pgd++, addr = next, addr != end);
+               walk_pud_range(&walker, pgd, addr, next);
+       }
 }
 
 static int show_smap(struct seq_file *m, void *v)
@@ -323,10 +379,22 @@ static int show_smap(struct seq_file *m, void *v)
 
        memset(&mss, 0, sizeof mss);
        if (vma->vm_mm && !is_vm_hugetlb_page(vma))
-               smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
+               walk_page_range(vma, smaps_pte_range, &mss);
        return show_map_internal(m, v, &mss);
 }
 
+void clear_refs_smap(struct mm_struct *mm)
+{
+       struct vm_area_struct *vma;
+
+       down_read(&mm->mmap_sem);
+       for (vma = mm->mmap; vma; vma = vma->vm_next)
+               if (vma->vm_mm && !is_vm_hugetlb_page(vma))
+                       walk_page_range(vma, clear_refs_pte_range, NULL);
+       flush_tlb_mm(mm);
+       up_read(&mm->mmap_sem);
+}
+
 static void *m_start(struct seq_file *m, loff_t *pos)
 {
        struct proc_maps_private *priv = m->private;