blob: 25762917e8222cc468d5551675f462b9924102fa [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21#include <linux/cache.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/threads.h>
27#include <linux/module.h>
28#include <linux/time.h>
29#include <linux/timex.h>
30#include <linux/sched.h>
31#include <linux/cpumask.h>
32
33#include <asm/atomic.h>
34#include <asm/cpu.h>
35#include <asm/processor.h>
36#include <asm/system.h>
37#include <asm/mmu_context.h>
38#include <asm/smp.h>
39
40cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
41volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
42cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
43int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
44int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
45
46EXPORT_SYMBOL(phys_cpu_present_map);
47EXPORT_SYMBOL(cpu_online_map);
48
49static void smp_tune_scheduling (void)
50{
51 struct cache_desc *cd = &current_cpu_data.scache;
52 unsigned long cachesize; /* kB */
53 unsigned long bandwidth = 350; /* MB/s */
54 unsigned long cpu_khz;
55
56 /*
57 * Crude estimate until we actually meassure ...
58 */
59 cpu_khz = loops_per_jiffy * 2 * HZ / 1000;
60
61 /*
62 * Rough estimation for SMP scheduling, this is the number of
63 * cycles it takes for a fully memory-limited process to flush
64 * the SMP-local cache.
65 *
66 * (For a P5 this pretty much means we will choose another idle
67 * CPU almost always at wakeup time (this is due to the small
68 * L1 cache), on PIIs it's around 50-100 usecs, depending on
69 * the cache size)
70 */
71 if (!cpu_khz)
72 return;
73
74 cachesize = cd->linesz * cd->sets * cd->ways;
75}
76
77extern void __init calibrate_delay(void);
78extern ATTRIB_NORET void cpu_idle(void);
79
80/*
81 * First C code run on the secondary CPUs after being started up by
82 * the master.
83 */
84asmlinkage void start_secondary(void)
85{
86 unsigned int cpu = smp_processor_id();
87
88 cpu_probe();
89 cpu_report();
90 per_cpu_trap_init();
91 prom_init_secondary();
92
93 /*
94 * XXX parity protection should be folded in here when it's converted
95 * to an option instead of something based on .cputype
96 */
97
98 calibrate_delay();
99 cpu_data[cpu].udelay_val = loops_per_jiffy;
100
101 prom_smp_finish();
102
103 cpu_set(cpu, cpu_callin_map);
104
105 cpu_idle();
106}
107
108DEFINE_SPINLOCK(smp_call_lock);
109
110struct call_data_struct *call_data;
111
112/*
113 * Run a function on all other CPUs.
114 * <func> The function to run. This must be fast and non-blocking.
115 * <info> An arbitrary pointer to pass to the function.
116 * <retry> If true, keep retrying until ready.
117 * <wait> If true, wait until function has completed on other CPUs.
118 * [RETURNS] 0 on success, else a negative status code.
119 *
120 * Does not return until remote CPUs are nearly ready to execute <func>
121 * or are or have executed.
122 *
123 * You must not call this function with disabled interrupts or from a
Ralf Baechle57f00602005-02-10 12:00:06 +0000124 * hardware interrupt handler or from a bottom half handler:
125 *
126 * CPU A CPU B
127 * Disable interrupts
128 * smp_call_function()
129 * Take call_lock
130 * Send IPIs
131 * Wait for all cpus to acknowledge IPI
132 * CPU A has not responded, spin waiting
133 * for cpu A to respond, holding call_lock
134 * smp_call_function()
135 * Spin waiting for call_lock
136 * Deadlock Deadlock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 */
138int smp_call_function (void (*func) (void *info), void *info, int retry,
139 int wait)
140{
141 struct call_data_struct data;
142 int i, cpus = num_online_cpus() - 1;
143 int cpu = smp_processor_id();
144
145 if (!cpus)
146 return 0;
147
148 /* Can deadlock when called with interrupts disabled */
149 WARN_ON(irqs_disabled());
150
151 data.func = func;
152 data.info = info;
153 atomic_set(&data.started, 0);
154 data.wait = wait;
155 if (wait)
156 atomic_set(&data.finished, 0);
157
158 spin_lock(&smp_call_lock);
159 call_data = &data;
160 mb();
161
162 /* Send a message to all other CPUs and wait for them to respond */
163 for (i = 0; i < NR_CPUS; i++)
164 if (cpu_online(i) && i != cpu)
165 core_send_ipi(i, SMP_CALL_FUNCTION);
166
167 /* Wait for response */
168 /* FIXME: lock-up detection, backtrace on lock-up */
169 while (atomic_read(&data.started) != cpus)
170 barrier();
171
172 if (wait)
173 while (atomic_read(&data.finished) != cpus)
174 barrier();
175 spin_unlock(&smp_call_lock);
176
177 return 0;
178}
179
180void smp_call_function_interrupt(void)
181{
182 void (*func) (void *info) = call_data->func;
183 void *info = call_data->info;
184 int wait = call_data->wait;
185
186 /*
187 * Notify initiating CPU that I've grabbed the data and am
188 * about to execute the function.
189 */
190 mb();
191 atomic_inc(&call_data->started);
192
193 /*
194 * At this point the info structure may be out of scope unless wait==1.
195 */
196 irq_enter();
197 (*func)(info);
198 irq_exit();
199
200 if (wait) {
201 mb();
202 atomic_inc(&call_data->finished);
203 }
204}
205
206static void stop_this_cpu(void *dummy)
207{
208 /*
209 * Remove this CPU:
210 */
211 cpu_clear(smp_processor_id(), cpu_online_map);
212 local_irq_enable(); /* May need to service _machine_restart IPI */
213 for (;;); /* Wait if available. */
214}
215
216void smp_send_stop(void)
217{
218 smp_call_function(stop_this_cpu, NULL, 1, 0);
219}
220
221void __init smp_cpus_done(unsigned int max_cpus)
222{
223 prom_cpus_done();
224}
225
226/* called from main before smp_init() */
227void __init smp_prepare_cpus(unsigned int max_cpus)
228{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 init_new_context(current, &init_mm);
230 current_thread_info()->cpu = 0;
231 smp_tune_scheduling();
232 prom_prepare_cpus(max_cpus);
233}
234
235/* preload SMP state for boot cpu */
236void __devinit smp_prepare_boot_cpu(void)
237{
238 /*
239 * This assumes that bootup is always handled by the processor
240 * with the logic and physical number 0.
241 */
242 __cpu_number_map[0] = 0;
243 __cpu_logical_map[0] = 0;
244 cpu_set(0, phys_cpu_present_map);
245 cpu_set(0, cpu_online_map);
246 cpu_set(0, cpu_callin_map);
247}
248
249/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
251 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
252 * physical, not logical.
253 */
254int __devinit __cpu_up(unsigned int cpu)
255{
Ralf Baechleb727a602005-02-22 21:18:01 +0000256 struct task_struct *idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Ralf Baechleb727a602005-02-22 21:18:01 +0000258 /*
259 * Processor goes to start_secondary(), sets online flag
260 * The following code is purely to make sure
261 * Linux can schedule processes on this slave.
262 */
263 idle = fork_idle(cpu);
264 if (IS_ERR(idle))
265 panic(KERN_ERR "Fork failed for CPU %d", cpu);
266
267 prom_boot_secondary(cpu, idle);
268
269 /*
270 * Trust is futile. We should really have timeouts ...
271 */
272 while (!cpu_isset(cpu, cpu_callin_map))
273 udelay(100);
274
275 cpu_set(cpu, cpu_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
277 return 0;
278}
279
280/* Not really SMP stuff ... */
281int setup_profiling_timer(unsigned int multiplier)
282{
283 return 0;
284}
285
286static void flush_tlb_all_ipi(void *info)
287{
288 local_flush_tlb_all();
289}
290
291void flush_tlb_all(void)
292{
293 on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
294}
295
296static void flush_tlb_mm_ipi(void *mm)
297{
298 local_flush_tlb_mm((struct mm_struct *)mm);
299}
300
301/*
302 * The following tlb flush calls are invoked when old translations are
303 * being torn down, or pte attributes are changing. For single threaded
304 * address spaces, a new context is obtained on the current cpu, and tlb
305 * context on other cpus are invalidated to force a new context allocation
306 * at switch_mm time, should the mm ever be used on other cpus. For
307 * multithreaded address spaces, intercpu interrupts have to be sent.
308 * Another case where intercpu interrupts are required is when the target
309 * mm might be active on another cpu (eg debuggers doing the flushes on
310 * behalf of debugees, kswapd stealing pages from another process etc).
311 * Kanoj 07/00.
312 */
313
314void flush_tlb_mm(struct mm_struct *mm)
315{
316 preempt_disable();
317
318 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
319 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
320 } else {
321 int i;
322 for (i = 0; i < num_online_cpus(); i++)
323 if (smp_processor_id() != i)
324 cpu_context(i, mm) = 0;
325 }
326 local_flush_tlb_mm(mm);
327
328 preempt_enable();
329}
330
331struct flush_tlb_data {
332 struct vm_area_struct *vma;
333 unsigned long addr1;
334 unsigned long addr2;
335};
336
337static void flush_tlb_range_ipi(void *info)
338{
339 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
340
341 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
342}
343
344void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
345{
346 struct mm_struct *mm = vma->vm_mm;
347
348 preempt_disable();
349 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
350 struct flush_tlb_data fd;
351
352 fd.vma = vma;
353 fd.addr1 = start;
354 fd.addr2 = end;
355 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
356 } else {
357 int i;
358 for (i = 0; i < num_online_cpus(); i++)
359 if (smp_processor_id() != i)
360 cpu_context(i, mm) = 0;
361 }
362 local_flush_tlb_range(vma, start, end);
363 preempt_enable();
364}
365
366static void flush_tlb_kernel_range_ipi(void *info)
367{
368 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
369
370 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
371}
372
373void flush_tlb_kernel_range(unsigned long start, unsigned long end)
374{
375 struct flush_tlb_data fd;
376
377 fd.addr1 = start;
378 fd.addr2 = end;
379 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
380}
381
382static void flush_tlb_page_ipi(void *info)
383{
384 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
385
386 local_flush_tlb_page(fd->vma, fd->addr1);
387}
388
389void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
390{
391 preempt_disable();
392 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
393 struct flush_tlb_data fd;
394
395 fd.vma = vma;
396 fd.addr1 = page;
397 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
398 } else {
399 int i;
400 for (i = 0; i < num_online_cpus(); i++)
401 if (smp_processor_id() != i)
402 cpu_context(i, vma->vm_mm) = 0;
403 }
404 local_flush_tlb_page(vma, page);
405 preempt_enable();
406}
407
408static void flush_tlb_one_ipi(void *info)
409{
410 unsigned long vaddr = (unsigned long) info;
411
412 local_flush_tlb_one(vaddr);
413}
414
415void flush_tlb_one(unsigned long vaddr)
416{
417 smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1);
418 local_flush_tlb_one(vaddr);
419}
420
421EXPORT_SYMBOL(flush_tlb_page);
422EXPORT_SYMBOL(flush_tlb_one);
423EXPORT_SYMBOL(cpu_data);
424EXPORT_SYMBOL(synchronize_irq);