Skip to content

Commit

Permalink
mm-softdirty-unmapped-addresses-between-vmas-are-clean-v2
Browse files Browse the repository at this point in the history
Restructured patch to make logic more clear.

Signed-off-by: Peter Feiner <pfeiner@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Jamie Liu <jamieliu@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
peterfeiner authored and sfrothwell committed Sep 26, 2014
1 parent 705ce78 commit 4c2f0f9
Showing 1 changed file with 20 additions and 20 deletions.
40 changes: 20 additions & 20 deletions fs/proc/task_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1058,36 +1058,36 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
return 0;

while (1) {
unsigned long vm_start = end;
unsigned long vm_end = end;
unsigned long vm_flags = 0;
/* End of address space hole, which we mark as non-present. */
unsigned long hole_end;

if (vma) {
/*
* We can't possibly be in a hugetlb VMA. In general,
* for a mm_walk with a pmd_entry and a hugetlb_entry,
* the pmd_entry can only be called on addresses in a
* hugetlb if the walk starts in a non-hugetlb VMA and
* spans a hugepage VMA. Since pagemap_read walks are
* PMD-sized and PMD-aligned, this will never be true.
*/
BUG_ON(is_vm_hugetlb_page(vma));
vm_start = vma->vm_start;
vm_end = min(end, vma->vm_end);
vm_flags = vma->vm_flags;
}
if (vma)
hole_end = min(end, vma->vm_start);
else
hole_end = end;

/* Addresses before the VMA. */
for (; addr < vm_start; addr += PAGE_SIZE) {
for (; addr < hole_end; addr += PAGE_SIZE) {
pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));

err = add_to_pagemap(addr, &pme, pm);
if (err)
return err;
}

if (!vma)
break;
/*
* We can't possibly be in a hugetlb VMA. In general,
* for a mm_walk with a pmd_entry and a hugetlb_entry,
* the pmd_entry can only be called on addresses in a
* hugetlb if the walk starts in a non-hugetlb VMA and
* spans a hugepage VMA. Since pagemap_read walks are
* PMD-sized and PMD-aligned, this will never be true.
*/
BUG_ON(is_vm_hugetlb_page(vma));

/* Addresses in the VMA. */
for (; addr < vm_end; addr += PAGE_SIZE) {
for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
pagemap_entry_t pme;
pte = pte_offset_map(pmd, addr);
pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
Expand Down

0 comments on commit 4c2f0f9

Please sign in to comment.