Tony Luck | 8d7e351 | 2005-07-06 18:18:10 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License as published by |
| 4 | * the Free Software Foundation; either version 2 of the License, or |
| 5 | * (at your option) any later version. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | * You should have received a copy of the GNU General Public License |
| 13 | * along with this program; if not, write to the Free Software |
| 14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 15 | * |
| 16 | * ia64 kernel NUMA specific stuff |
| 17 | * |
| 18 | * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> |
| 19 | * Copyright (C) 2004 Silicon Graphics, Inc. |
| 20 | * Jesse Barnes <jbarnes@sgi.com> |
| 21 | */ |
Tony Luck | 8d7e351 | 2005-07-06 18:18:10 -0700 | [diff] [blame] | 22 | #include <linux/topology.h> |
| 23 | #include <linux/module.h> |
| 24 | #include <asm/processor.h> |
| 25 | #include <asm/smp.h> |
| 26 | |
Jack Steiner | a9de983 | 2006-03-02 16:02:28 -0600 | [diff] [blame] | 27 | u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; |
Tony Luck | 8d7e351 | 2005-07-06 18:18:10 -0700 | [diff] [blame] | 28 | EXPORT_SYMBOL(cpu_to_node_map); |
| 29 | |
| 30 | cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; |
Greg Banks | a406c36 | 2006-10-02 02:17:41 -0700 | [diff] [blame] | 31 | EXPORT_SYMBOL(node_to_cpu_mask); |
Tony Luck | 8d7e351 | 2005-07-06 18:18:10 -0700 | [diff] [blame] | 32 | |
Paul Gortmaker | ccce9bb | 2013-06-17 15:51:20 -0400 | [diff] [blame] | 33 | void map_cpu_to_node(int cpu, int nid) |
KAMEZAWA Hiroyuki | 3212fe1 | 2006-09-25 16:25:31 -0700 | [diff] [blame] | 34 | { |
| 35 | int oldnid; |
| 36 | if (nid < 0) { /* just initialize by zero */ |
| 37 | cpu_to_node_map[cpu] = 0; |
| 38 | return; |
| 39 | } |
| 40 | /* sanity check first */ |
| 41 | oldnid = cpu_to_node_map[cpu]; |
Rusty Russell | 5d2068d | 2015-03-05 10:49:16 +1030 | [diff] [blame] | 42 | if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) { |
KAMEZAWA Hiroyuki | 3212fe1 | 2006-09-25 16:25:31 -0700 | [diff] [blame] | 43 | return; /* nothing to do */ |
| 44 | } |
| 45 | /* we don't have cpu-driven node hot add yet... |
| 46 | In usual case, node is created from SRAT at boot time. */ |
| 47 | if (!node_online(nid)) |
| 48 | nid = first_online_node; |
| 49 | cpu_to_node_map[cpu] = nid; |
Rusty Russell | 5d2068d | 2015-03-05 10:49:16 +1030 | [diff] [blame] | 50 | cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]); |
KAMEZAWA Hiroyuki | 3212fe1 | 2006-09-25 16:25:31 -0700 | [diff] [blame] | 51 | return; |
| 52 | } |
| 53 | |
Paul Gortmaker | ccce9bb | 2013-06-17 15:51:20 -0400 | [diff] [blame] | 54 | void unmap_cpu_from_node(int cpu, int nid) |
KAMEZAWA Hiroyuki | 3212fe1 | 2006-09-25 16:25:31 -0700 | [diff] [blame] | 55 | { |
Rusty Russell | 5d2068d | 2015-03-05 10:49:16 +1030 | [diff] [blame] | 56 | WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid])); |
KAMEZAWA Hiroyuki | 3212fe1 | 2006-09-25 16:25:31 -0700 | [diff] [blame] | 57 | WARN_ON(cpu_to_node_map[cpu] != nid); |
| 58 | cpu_to_node_map[cpu] = 0; |
Rusty Russell | 5d2068d | 2015-03-05 10:49:16 +1030 | [diff] [blame] | 59 | cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]); |
KAMEZAWA Hiroyuki | 3212fe1 | 2006-09-25 16:25:31 -0700 | [diff] [blame] | 60 | } |
| 61 | |
| 62 | |
Tony Luck | 8d7e351 | 2005-07-06 18:18:10 -0700 | [diff] [blame] | 63 | /** |
| 64 | * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays |
| 65 | * |
| 66 | * Build cpu to node mapping and initialize the per node cpu masks using |
| 67 | * info from the node_cpuid array handed to us by ACPI. |
| 68 | */ |
| 69 | void __init build_cpu_to_node_map(void) |
| 70 | { |
| 71 | int cpu, i, node; |
| 72 | |
| 73 | for(node=0; node < MAX_NUMNODES; node++) |
Rusty Russell | 5d2068d | 2015-03-05 10:49:16 +1030 | [diff] [blame] | 74 | cpumask_clear(&node_to_cpu_mask[node]); |
Tony Luck | 8d7e351 | 2005-07-06 18:18:10 -0700 | [diff] [blame] | 75 | |
holt@sgi.com | 2c6e6db | 2008-04-03 15:17:13 -0500 | [diff] [blame] | 76 | for_each_possible_early_cpu(cpu) { |
Tony Luck | 8d7e351 | 2005-07-06 18:18:10 -0700 | [diff] [blame] | 77 | node = -1; |
| 78 | for (i = 0; i < NR_CPUS; ++i) |
| 79 | if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { |
| 80 | node = node_cpuid[i].nid; |
| 81 | break; |
| 82 | } |
KAMEZAWA Hiroyuki | 3212fe1 | 2006-09-25 16:25:31 -0700 | [diff] [blame] | 83 | map_cpu_to_node(cpu, node); |
Tony Luck | 8d7e351 | 2005-07-06 18:18:10 -0700 | [diff] [blame] | 84 | } |
| 85 | } |