blob: 8a959f20662d55f7ff9b49f0edaf37bcdbbca617 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Tony Luck8d7e3512005-07-06 18:18:10 -07002/*
Tony Luck8d7e3512005-07-06 18:18:10 -07003 *
4 * ia64 kernel NUMA specific stuff
5 *
6 * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
7 * Copyright (C) 2004 Silicon Graphics, Inc.
8 * Jesse Barnes <jbarnes@sgi.com>
9 */
Tony Luck8d7e3512005-07-06 18:18:10 -070010#include <linux/topology.h>
11#include <linux/module.h>
12#include <asm/processor.h>
13#include <asm/smp.h>
14
Jack Steinera9de9832006-03-02 16:02:28 -060015u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
Tony Luck8d7e3512005-07-06 18:18:10 -070016EXPORT_SYMBOL(cpu_to_node_map);
17
18cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
Greg Banksa406c362006-10-02 02:17:41 -070019EXPORT_SYMBOL(node_to_cpu_mask);
Tony Luck8d7e3512005-07-06 18:18:10 -070020
Paul Gortmakerccce9bb2013-06-17 15:51:20 -040021void map_cpu_to_node(int cpu, int nid)
KAMEZAWA Hiroyuki3212fe12006-09-25 16:25:31 -070022{
23 int oldnid;
24 if (nid < 0) { /* just initialize by zero */
25 cpu_to_node_map[cpu] = 0;
26 return;
27 }
28 /* sanity check first */
29 oldnid = cpu_to_node_map[cpu];
Rusty Russell5d2068d2015-03-05 10:49:16 +103030 if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) {
KAMEZAWA Hiroyuki3212fe12006-09-25 16:25:31 -070031 return; /* nothing to do */
32 }
33 /* we don't have cpu-driven node hot add yet...
34 In usual case, node is created from SRAT at boot time. */
35 if (!node_online(nid))
36 nid = first_online_node;
37 cpu_to_node_map[cpu] = nid;
Rusty Russell5d2068d2015-03-05 10:49:16 +103038 cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]);
KAMEZAWA Hiroyuki3212fe12006-09-25 16:25:31 -070039 return;
40}
41
Paul Gortmakerccce9bb2013-06-17 15:51:20 -040042void unmap_cpu_from_node(int cpu, int nid)
KAMEZAWA Hiroyuki3212fe12006-09-25 16:25:31 -070043{
Rusty Russell5d2068d2015-03-05 10:49:16 +103044 WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid]));
KAMEZAWA Hiroyuki3212fe12006-09-25 16:25:31 -070045 WARN_ON(cpu_to_node_map[cpu] != nid);
46 cpu_to_node_map[cpu] = 0;
Rusty Russell5d2068d2015-03-05 10:49:16 +103047 cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]);
KAMEZAWA Hiroyuki3212fe12006-09-25 16:25:31 -070048}
49
50
Tony Luck8d7e3512005-07-06 18:18:10 -070051/**
52 * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
53 *
54 * Build cpu to node mapping and initialize the per node cpu masks using
55 * info from the node_cpuid array handed to us by ACPI.
56 */
57void __init build_cpu_to_node_map(void)
58{
59 int cpu, i, node;
60
61 for(node=0; node < MAX_NUMNODES; node++)
Rusty Russell5d2068d2015-03-05 10:49:16 +103062 cpumask_clear(&node_to_cpu_mask[node]);
Tony Luck8d7e3512005-07-06 18:18:10 -070063
holt@sgi.com2c6e6db2008-04-03 15:17:13 -050064 for_each_possible_early_cpu(cpu) {
Anshuman Khandual98fa15f2019-03-05 15:42:58 -080065 node = NUMA_NO_NODE;
Tony Luck8d7e3512005-07-06 18:18:10 -070066 for (i = 0; i < NR_CPUS; ++i)
67 if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
68 node = node_cpuid[i].nid;
69 break;
70 }
KAMEZAWA Hiroyuki3212fe12006-09-25 16:25:31 -070071 map_cpu_to_node(cpu, node);
Tony Luck8d7e3512005-07-06 18:18:10 -070072 }
73}