Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 2 | /* |
| 3 | * linux/mm/mmzone.c |
| 4 | * |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 5 | * management codes for pgdats, zones and page flags |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 9 | #include <linux/stddef.h> |
Mel Gorman | eb33575 | 2009-05-13 17:34:48 +0100 | [diff] [blame] | 10 | #include <linux/mm.h> |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 11 | #include <linux/mmzone.h> |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 12 | |
| 13 | struct pglist_data *first_online_pgdat(void) |
| 14 | { |
| 15 | return NODE_DATA(first_online_node); |
| 16 | } |
| 17 | |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 18 | struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) |
| 19 | { |
| 20 | int nid = next_online_node(pgdat->node_id); |
| 21 | |
| 22 | if (nid == MAX_NUMNODES) |
| 23 | return NULL; |
| 24 | return NODE_DATA(nid); |
| 25 | } |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 26 | |
| 27 | /* |
| 28 | * next_zone - helper magic for for_each_zone() |
| 29 | */ |
| 30 | struct zone *next_zone(struct zone *zone) |
| 31 | { |
| 32 | pg_data_t *pgdat = zone->zone_pgdat; |
| 33 | |
| 34 | if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) |
| 35 | zone++; |
| 36 | else { |
| 37 | pgdat = next_online_pgdat(pgdat); |
| 38 | if (pgdat) |
| 39 | zone = pgdat->node_zones; |
| 40 | else |
| 41 | zone = NULL; |
| 42 | } |
| 43 | return zone; |
| 44 | } |
KAMEZAWA Hiroyuki | 95144c7 | 2006-03-27 01:16:02 -0800 | [diff] [blame] | 45 | |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 46 | static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes) |
| 47 | { |
| 48 | #ifdef CONFIG_NUMA |
| 49 | return node_isset(zonelist_node_idx(zref), *nodes); |
| 50 | #else |
| 51 | return 1; |
| 52 | #endif /* CONFIG_NUMA */ |
| 53 | } |
| 54 | |
| 55 | /* Returns the next zone at or below highest_zoneidx in a zonelist */ |
Mel Gorman | 682a338 | 2016-05-19 17:13:30 -0700 | [diff] [blame] | 56 | struct zoneref *__next_zones_zonelist(struct zoneref *z, |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 57 | enum zone_type highest_zoneidx, |
Vlastimil Babka | 05891fb | 2015-02-11 15:25:47 -0800 | [diff] [blame] | 58 | nodemask_t *nodes) |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 59 | { |
| 60 | /* |
| 61 | * Find the next suitable zone to use for the allocation. |
| 62 | * Only filter based on nodemask if it's set |
| 63 | */ |
Steven Rostedt | e57b9d8 | 2017-02-22 15:44:47 -0800 | [diff] [blame] | 64 | if (unlikely(nodes == NULL)) |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 65 | while (zonelist_zone_idx(z) > highest_zoneidx) |
| 66 | z++; |
| 67 | else |
| 68 | while (zonelist_zone_idx(z) > highest_zoneidx || |
| 69 | (z->zone && !zref_in_nodemask(z, nodes))) |
| 70 | z++; |
| 71 | |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 72 | return z; |
| 73 | } |
Mel Gorman | eb33575 | 2009-05-13 17:34:48 +0100 | [diff] [blame] | 74 | |
Hugh Dickins | bea8c15 | 2012-11-16 14:14:54 -0800 | [diff] [blame] | 75 | void lruvec_init(struct lruvec *lruvec) |
Konstantin Khlebnikov | 7f5e86c | 2012-05-29 15:06:58 -0700 | [diff] [blame] | 76 | { |
| 77 | enum lru_list lru; |
| 78 | |
| 79 | memset(lruvec, 0, sizeof(struct lruvec)); |
Alex Shi | 6168d0d | 2020-12-15 12:34:29 -0800 | [diff] [blame] | 80 | spin_lock_init(&lruvec->lru_lock); |
Konstantin Khlebnikov | 7f5e86c | 2012-05-29 15:06:58 -0700 | [diff] [blame] | 81 | |
| 82 | for_each_lru(lru) |
| 83 | INIT_LIST_HEAD(&lruvec->lists[lru]); |
Konstantin Khlebnikov | 7f5e86c | 2012-05-29 15:06:58 -0700 | [diff] [blame] | 84 | } |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 85 | |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 86 | #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) |
| 87 | int page_cpupid_xchg_last(struct page *page, int cpupid) |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 88 | { |
| 89 | unsigned long old_flags, flags; |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 90 | int last_cpupid; |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 91 | |
| 92 | do { |
| 93 | old_flags = flags = page->flags; |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 94 | last_cpupid = page_cpupid_last(page); |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 95 | |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 96 | flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); |
| 97 | flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 98 | } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags)); |
| 99 | |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 100 | return last_cpupid; |
Mel Gorman | 4468b8f | 2013-02-22 16:34:46 -0800 | [diff] [blame] | 101 | } |
| 102 | #endif |