smaps: break out smaps_pte_entry() from smaps_pte_range()
We will use smaps_pte_entry() in a moment to handle both small and
transparent large pages. But, we must break it out of smaps_pte_range()
first.
Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: Eric B Munson <emunson@mgebm.net>
Tested-by: Eric B Munson <emunson@mgebm.net>
Cc: Michael J Wolf <mjwolf@us.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 78fd362..5cd06fa 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -333,56 +333,63 @@
u64 pss;
};
+
+static void smaps_pte_entry(pte_t ptent, unsigned long addr,
+ struct mm_walk *walk)
+{
+ struct mem_size_stats *mss = walk->private;
+ struct vm_area_struct *vma = mss->vma;
+ struct page *page;
+ int mapcount;
+
+ if (is_swap_pte(ptent)) {
+ mss->swap += PAGE_SIZE;
+ return;
+ }
+
+ if (!pte_present(ptent))
+ return;
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (!page)
+ return;
+
+ if (PageAnon(page))
+ mss->anonymous += PAGE_SIZE;
+
+ mss->resident += PAGE_SIZE;
+ /* Accumulate the size in pages that have been accessed. */
+ if (pte_young(ptent) || PageReferenced(page))
+ mss->referenced += PAGE_SIZE;
+ mapcount = page_mapcount(page);
+ if (mapcount >= 2) {
+ if (pte_dirty(ptent) || PageDirty(page))
+ mss->shared_dirty += PAGE_SIZE;
+ else
+ mss->shared_clean += PAGE_SIZE;
+ mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
+ } else {
+ if (pte_dirty(ptent) || PageDirty(page))
+ mss->private_dirty += PAGE_SIZE;
+ else
+ mss->private_clean += PAGE_SIZE;
+ mss->pss += (PAGE_SIZE << PSS_SHIFT);
+ }
+}
+
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
- pte_t *pte, ptent;
+ pte_t *pte;
spinlock_t *ptl;
- struct page *page;
- int mapcount;
split_huge_page_pmd(walk->mm, pmd);
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- for (; addr != end; pte++, addr += PAGE_SIZE) {
- ptent = *pte;
-
- if (is_swap_pte(ptent)) {
- mss->swap += PAGE_SIZE;
- continue;
- }
-
- if (!pte_present(ptent))
- continue;
-
- page = vm_normal_page(vma, addr, ptent);
- if (!page)
- continue;
-
- if (PageAnon(page))
- mss->anonymous += PAGE_SIZE;
-
- mss->resident += PAGE_SIZE;
- /* Accumulate the size in pages that have been accessed. */
- if (pte_young(ptent) || PageReferenced(page))
- mss->referenced += PAGE_SIZE;
- mapcount = page_mapcount(page);
- if (mapcount >= 2) {
- if (pte_dirty(ptent) || PageDirty(page))
- mss->shared_dirty += PAGE_SIZE;
- else
- mss->shared_clean += PAGE_SIZE;
- mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
- } else {
- if (pte_dirty(ptent) || PageDirty(page))
- mss->private_dirty += PAGE_SIZE;
- else
- mss->private_clean += PAGE_SIZE;
- mss->pss += (PAGE_SIZE << PSS_SHIFT);
- }
- }
+ for (; addr != end; pte++, addr += PAGE_SIZE)
+ smaps_pte_entry(*pte, addr, walk);
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;