Skip to content

Commit

Permalink
mm/page_alloc: protect PCP lists with a spinlock
Browse files Browse the repository at this point in the history
Currently the PCP lists are protected by using local_lock_irqsave to
prevent migration and IRQ reentrancy but this is inconvenient.  Remote
draining of the lists is impossible and a workqueue is required and every
task allocation/free must disable then enable interrupts which is
expensive.

As preparation for dealing with both of those problems, protect the lists
with a spinlock.  The IRQ-unsafe version of the lock is used because IRQs
are already disabled by local_lock_irqsave.  spin_trylock is used in
preparation for a time when local_lock could be used instead of
lock_lock_irqsave.

The per_cpu_pages still fits within the same number of cache lines after
this patch relative to before the series.

struct per_cpu_pages {
        spinlock_t                 lock;                 /*     0     4 */
        int                        count;                /*     4     4 */
        int                        high;                 /*     8     4 */
        int                        batch;                /*    12     4 */
        short int                  free_factor;          /*    16     2 */
        short int                  expire;               /*    18     2 */

        /* XXX 4 bytes hole, try to pack */

        struct list_head           lists[13];            /*    24   208 */

        /* size: 256, cachelines: 4, members: 7 */
        /* sum members: 228, holes: 1, sum holes: 4 */
        /* padding: 24 */
} __attribute__((__aligned__(64)));

There is overhead in the fast path due to acquiring the spinlock even
though the spinlock is per-cpu and uncontended in the common case.  Page
Fault Test (PFT) running on a 1-socket reported the following results on a
1 socket machine.

                                     5.18.0-rc1               5.18.0-rc1
                                        vanilla         mm-pcpdrain-v2r1
Hmean     faults/sec-1   886331.5718 (   0.00%)   885462.7479 (  -0.10%)
Hmean     faults/sec-3  2337706.1583 (   0.00%)  2332130.4909 *  -0.24%*
Hmean     faults/sec-5  2851594.2897 (   0.00%)  2844123.9307 (  -0.26%)
Hmean     faults/sec-7  3543251.5507 (   0.00%)  3516889.0442 *  -0.74%*
Hmean     faults/sec-8  3947098.0024 (   0.00%)  3916162.8476 *  -0.78%*
Stddev    faults/sec-1     2302.9105 (   0.00%)     2065.0845 (  10.33%)
Stddev    faults/sec-3     7275.2442 (   0.00%)     6033.2620 (  17.07%)
Stddev    faults/sec-5    24726.0328 (   0.00%)    12525.1026 (  49.34%)
Stddev    faults/sec-7     9974.2542 (   0.00%)     9543.9627 (   4.31%)
Stddev    faults/sec-8     9468.0191 (   0.00%)     7958.2607 (  15.95%)
CoeffVar  faults/sec-1        0.2598 (   0.00%)        0.2332 (  10.24%)
CoeffVar  faults/sec-3        0.3112 (   0.00%)        0.2587 (  16.87%)
CoeffVar  faults/sec-5        0.8670 (   0.00%)        0.4404 (  49.21%)
CoeffVar  faults/sec-7        0.2815 (   0.00%)        0.2714 (   3.60%)
CoeffVar  faults/sec-8        0.2399 (   0.00%)        0.2032 (  15.28%)

There is a small hit in the number of faults per second but given that the
results are more stable, it's borderline noise.

Link: https://lkml.kernel.org/r/20220512085043.5234-6-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Tested-by: Minchan Kim <minchan@kernel.org>
Acked-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Nicolas Saenz Julienne <nsaenzju@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
gormanm authored and akpm00 committed May 27, 2022
1 parent c9534a8 commit 3dfdcac
Show file tree
Hide file tree
Showing 2 changed files with 149 additions and 21 deletions.
1 change: 1 addition & 0 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -382,6 +382,7 @@ enum zone_watermarks {

/* Fields and list protected by pagesets local_lock in page_alloc.c */
struct per_cpu_pages {
spinlock_t lock; /* Protects lists field */
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
int batch; /* chunk size for buddy add/remove */
Expand Down
169 changes: 148 additions & 21 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,20 @@ static DEFINE_PER_CPU(struct pagesets, pagesets) = {
.lock = INIT_LOCAL_LOCK(lock),
};

#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
/*
* On SMP, spin_trylock is sufficient protection.
* On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
*/
#define pcp_trylock_prepare(flags) do { } while (0)
#define pcp_trylock_finish(flag) do { } while (0)
#else

/* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
#define pcp_trylock_prepare(flags) local_irq_save(flags)
#define pcp_trylock_finish(flags) local_irq_restore(flags)
#endif

#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
Expand Down Expand Up @@ -3093,15 +3107,22 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
*/
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
int to_drain, batch;

local_lock_irqsave(&pagesets.lock, flags);
batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
if (to_drain > 0)
if (to_drain > 0) {
unsigned long flags;

/*
* free_pcppages_bulk expects IRQs disabled for zone->lock
* so even though pcp->lock is not intended to be IRQ-safe,
* it's needed in this context.
*/
spin_lock_irqsave(&pcp->lock, flags);
free_pcppages_bulk(zone, to_drain, pcp, 0);
local_unlock_irqrestore(&pagesets.lock, flags);
spin_unlock_irqrestore(&pcp->lock, flags);
}
}
#endif

Expand All @@ -3114,16 +3135,17 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
*/
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
{
unsigned long flags;
struct per_cpu_pages *pcp;

local_lock_irqsave(&pagesets.lock, flags);

pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
if (pcp->count)
free_pcppages_bulk(zone, pcp->count, pcp, 0);
if (pcp->count) {
unsigned long flags;

local_unlock_irqrestore(&pagesets.lock, flags);
/* See drain_zone_pages on why this is disabling IRQs */
spin_lock_irqsave(&pcp->lock, flags);
free_pcppages_bulk(zone, pcp->count, pcp, 0);
spin_unlock_irqrestore(&pcp->lock, flags);
}
}

/*
Expand Down Expand Up @@ -3391,18 +3413,30 @@ static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
return min(READ_ONCE(pcp->batch) << 2, high);
}

static void free_unref_page_commit(struct page *page, int migratetype,
unsigned int order)
/* Returns true if the page was committed to the per-cpu list. */
static bool free_unref_page_commit(struct page *page, int migratetype,
unsigned int order, bool locked)
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
int high;
int pindex;
bool free_high;
unsigned long __maybe_unused UP_flags;

__count_vm_event(PGFREE);
pcp = this_cpu_ptr(zone->per_cpu_pageset);
pindex = order_to_pindex(migratetype, order);

if (!locked) {
/* Protect against a parallel drain. */
pcp_trylock_prepare(UP_flags);
if (!spin_trylock(&pcp->lock)) {
pcp_trylock_finish(UP_flags);
return false;
}
}

list_add(&page->pcp_list, &pcp->lists[pindex]);
pcp->count += 1 << order;

Expand All @@ -3420,6 +3454,13 @@ static void free_unref_page_commit(struct page *page, int migratetype,

free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch, free_high), pcp, pindex);
}

if (!locked) {
spin_unlock(&pcp->lock);
pcp_trylock_finish(UP_flags);
}

return true;
}

/*
Expand All @@ -3430,6 +3471,7 @@ void free_unref_page(struct page *page, unsigned int order)
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
int migratetype;
bool freed_pcp = false;

if (!free_unref_page_prepare(page, pfn, order))
return;
Expand All @@ -3451,8 +3493,11 @@ void free_unref_page(struct page *page, unsigned int order)
}

local_lock_irqsave(&pagesets.lock, flags);
free_unref_page_commit(page, migratetype, order);
freed_pcp = free_unref_page_commit(page, migratetype, order, false);
local_unlock_irqrestore(&pagesets.lock, flags);

if (unlikely(!freed_pcp))
free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
}

/*
Expand All @@ -3461,10 +3506,19 @@ void free_unref_page(struct page *page, unsigned int order)
void free_unref_page_list(struct list_head *list)
{
struct page *page, *next;
struct per_cpu_pages *pcp;
struct zone *locked_zone;
unsigned long flags;
int batch_count = 0;
int migratetype;

/*
* An empty list is possible. Check early so that the later
* lru_to_page() does not potentially read garbage.
*/
if (list_empty(list))
return;

/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_to_pfn(page);
Expand All @@ -3485,8 +3539,33 @@ void free_unref_page_list(struct list_head *list)
}
}

/*
* Preparation could have drained the list due to failing to prepare
* or all pages are being isolated.
*/
if (list_empty(list))
return;

VM_BUG_ON(in_hardirq());

local_lock_irqsave(&pagesets.lock, flags);

page = lru_to_page(list);
locked_zone = page_zone(page);
pcp = this_cpu_ptr(locked_zone->per_cpu_pageset);
spin_lock(&pcp->lock);

list_for_each_entry_safe(page, next, list, lru) {
struct zone *zone = page_zone(page);

/* Different zone, different pcp lock. */
if (zone != locked_zone) {
spin_unlock(&pcp->lock);
locked_zone = zone;
pcp = this_cpu_ptr(zone->per_cpu_pageset);
spin_lock(&pcp->lock);
}

/*
* Non-isolated types over MIGRATE_PCPTYPES get added
* to the MIGRATE_MOVABLE pcp list.
Expand All @@ -3496,18 +3575,33 @@ void free_unref_page_list(struct list_head *list)
migratetype = MIGRATE_MOVABLE;

trace_mm_page_free_batched(page);
free_unref_page_commit(page, migratetype, 0);

/*
* If there is a parallel drain in progress, free to the buddy
* allocator directly. This is expensive as the zone lock will
* be acquired multiple times but if a drain is in progress
* then an expensive operation is already taking place.
*
* TODO: Always false at the moment due to local_lock_irqsave
* and is preparation for converting to local_lock.
*/
if (unlikely(!free_unref_page_commit(page, migratetype, 0, true)))
free_one_page(page_zone(page), page, page_to_pfn(page), 0, migratetype, FPI_NONE);

/*
* Guard against excessive IRQ disabled times when we get
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
spin_unlock(&pcp->lock);
local_unlock_irqrestore(&pagesets.lock, flags);
batch_count = 0;
local_lock_irqsave(&pagesets.lock, flags);
pcp = this_cpu_ptr(locked_zone->per_cpu_pageset);
spin_lock(&pcp->lock);
}
}
spin_unlock(&pcp->lock);
local_unlock_irqrestore(&pagesets.lock, flags);
}

Expand Down Expand Up @@ -3676,9 +3770,28 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
int migratetype,
unsigned int alloc_flags,
struct per_cpu_pages *pcp,
struct list_head *list)
struct list_head *list,
bool locked)
{
struct page *page;
unsigned long __maybe_unused UP_flags;

/*
* spin_trylock is not necessary right now due to due to
* local_lock_irqsave and is a preparation step for
* a conversion to local_lock using the trylock to prevent
* IRQ re-entrancy. If pcp->lock cannot be acquired, the caller
* uses rmqueue_buddy.
*
* TODO: Convert local_lock_irqsave to local_lock.
*/
if (unlikely(!locked)) {
pcp_trylock_prepare(UP_flags);
if (!spin_trylock(&pcp->lock)) {
pcp_trylock_finish(UP_flags);
return NULL;
}
}

do {
if (list_empty(list)) {
Expand All @@ -3699,15 +3812,23 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
migratetype, alloc_flags);

pcp->count += alloced << order;
if (unlikely(list_empty(list)))
return NULL;
if (unlikely(list_empty(list))) {
page = NULL;
goto out;
}
}

page = list_first_entry(list, struct page, pcp_list);
list_del(&page->pcp_list);
pcp->count -= 1 << order;
} while (check_new_pcp(page, order));

out:
if (!locked) {
spin_unlock(&pcp->lock);
pcp_trylock_finish(UP_flags);
}

return page;
}

Expand All @@ -3732,7 +3853,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
pcp = this_cpu_ptr(zone->per_cpu_pageset);
pcp->free_factor >>= 1;
list = &pcp->lists[order_to_pindex(migratetype, order)];
page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list, false);
local_unlock_irqrestore(&pagesets.lock, flags);
if (page) {
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
Expand Down Expand Up @@ -3767,7 +3888,8 @@ struct page *rmqueue(struct zone *preferred_zone,
migratetype != MIGRATE_MOVABLE) {
page = rmqueue_pcplist(preferred_zone, zone, order,
gfp_flags, migratetype, alloc_flags);
goto out;
if (likely(page))
goto out;
}
}

Expand Down Expand Up @@ -5341,6 +5463,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
local_lock_irqsave(&pagesets.lock, flags);
pcp = this_cpu_ptr(zone->per_cpu_pageset);
pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
spin_lock(&pcp->lock);

while (nr_populated < nr_pages) {

Expand All @@ -5351,11 +5474,13 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
}

page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
pcp, pcp_list);
pcp, pcp_list, true);
if (unlikely(!page)) {
/* Try and allocate at least one page */
if (!nr_account)
if (!nr_account) {
spin_unlock(&pcp->lock);
goto failed_irq;
}
break;
}
nr_account++;
Expand All @@ -5368,6 +5493,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
nr_populated++;
}

spin_unlock(&pcp->lock);
local_unlock_irqrestore(&pagesets.lock, flags);

__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
Expand Down Expand Up @@ -7017,6 +7143,7 @@ static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonesta
memset(pcp, 0, sizeof(*pcp));
memset(pzstats, 0, sizeof(*pzstats));

spin_lock_init(&pcp->lock);
for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
INIT_LIST_HEAD(&pcp->lists[pindex]);

Expand Down

0 comments on commit 3dfdcac

Please sign in to comment.