blob: 5911f61e300eb0497b4f2e051caacf179ae2b0c9 [file] [log] [blame]
Glauber Costac048fdf2008-03-03 14:12:54 -03001#include <linux/init.h>
2
3#include <linux/mm.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03004#include <linux/spinlock.h>
5#include <linux/smp.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03006#include <linux/interrupt.h>
Tejun Heo6dd01be2009-01-21 17:26:06 +09007#include <linux/module.h>
Shaohua Li93296722010-10-20 11:07:03 +08008#include <linux/cpu.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03009
Glauber Costac048fdf2008-03-03 14:12:54 -030010#include <asm/tlbflush.h>
Glauber Costac048fdf2008-03-03 14:12:54 -030011#include <asm/mmu_context.h>
Jan Beulich350f8f52009-11-13 11:54:40 +000012#include <asm/cache.h>
Tejun Heo6dd01be2009-01-21 17:26:06 +090013#include <asm/apic.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090014#include <asm/uv/uv.h>
Alex Shi3df32122012-06-28 09:02:20 +080015#include <linux/debugfs.h>
Glauber Costa5af55732008-03-25 13:28:56 -030016
Brian Gerst9eb912d2009-01-19 00:38:57 +090017DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
18 = { &init_mm, 0, };
19
Glauber Costac048fdf2008-03-03 14:12:54 -030020/*
21 * Smarter SMP flushing macros.
22 * c/o Linus Torvalds.
23 *
24 * These mean you can really definitely utterly forget about
25 * writing to user space from interrupts. (Its not allowed anyway).
26 *
27 * Optimizations Manfred Spraul <manfred@colorfullife.com>
28 *
29 * More scalable flush, from Andi Kleen
30 *
31 * To avoid global state use 8 different call vectors.
32 * Each CPU uses a specific vector to trigger flushes on other
33 * CPUs. Depending on the received vector the target CPUs look into
Frederik Deweerdt09b3ec72009-01-12 22:35:42 +010034 * the right array slot for the flush data.
Glauber Costac048fdf2008-03-03 14:12:54 -030035 *
36 * With more than 8 CPUs they are hashed to the 8 available
37 * vectors. The limited global vector space forces us to this right now.
38 * In future when interrupts are split into per CPU domains this could be
39 * fixed, at the cost of triggering multiple IPIs in some cases.
40 */
41
42union smp_flush_state {
43 struct {
Glauber Costac048fdf2008-03-03 14:12:54 -030044 struct mm_struct *flush_mm;
Alex Shie7b52ff2012-06-28 09:02:17 +080045 unsigned long flush_start;
46 unsigned long flush_end;
Thomas Gleixner39c662f2009-07-25 19:15:48 +020047 raw_spinlock_t tlbstate_lock;
Rusty Russell4595f962009-01-10 21:58:09 -080048 DECLARE_BITMAP(flush_cpumask, NR_CPUS);
Glauber Costac048fdf2008-03-03 14:12:54 -030049 };
Jan Beulich350f8f52009-11-13 11:54:40 +000050 char pad[INTERNODE_CACHE_BYTES];
Frederik Deweerdt09b3ec72009-01-12 22:35:42 +010051} ____cacheline_internodealigned_in_smp;
Glauber Costac048fdf2008-03-03 14:12:54 -030052
53/* State is put into the per CPU data section, but padded
54 to a full cache line because other CPUs can access it and we don't
55 want false sharing in the per cpu data segment. */
Frederik Deweerdt09b3ec72009-01-12 22:35:42 +010056static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
Glauber Costac048fdf2008-03-03 14:12:54 -030057
Shaohua Li93296722010-10-20 11:07:03 +080058static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
59
Glauber Costac048fdf2008-03-03 14:12:54 -030060/*
61 * We cannot call mmdrop() because we are in interrupt context,
62 * instead update mm->cpu_vm_mask.
63 */
64void leave_mm(int cpu)
65{
Linus Torvalds02171b42012-05-23 11:06:59 -070066 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Alex Shic6ae41e2012-05-11 15:35:27 +080067 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
Glauber Costac048fdf2008-03-03 14:12:54 -030068 BUG();
Suresh Siddhaa6fca402012-03-22 17:01:25 -070069 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
70 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
71 load_cr3(swapper_pg_dir);
72 }
Glauber Costac048fdf2008-03-03 14:12:54 -030073}
74EXPORT_SYMBOL_GPL(leave_mm);
75
76/*
77 *
78 * The flush IPI assumes that a thread switch happens in this order:
79 * [cpu0: the cpu that switches]
80 * 1) switch_mm() either 1a) or 1b)
81 * 1a) thread switch to a different mm
82 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
83 * Stop ipi delivery for the old mm. This is not synchronized with
84 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
85 * for the wrong mm, and in the worst case we perform a superfluous
86 * tlb flush.
87 * 1a2) set cpu mmu_state to TLBSTATE_OK
88 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
89 * was in lazy tlb mode.
90 * 1a3) update cpu active_mm
91 * Now cpu0 accepts tlb flushes for the new mm.
92 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
93 * Now the other cpus will send tlb flush ipis.
94 * 1a4) change cr3.
95 * 1b) thread switch without mm change
96 * cpu active_mm is correct, cpu0 already handles
97 * flush ipis.
98 * 1b1) set cpu mmu_state to TLBSTATE_OK
99 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
100 * Atomically set the bit [other cpus will start sending flush ipis],
101 * and test the bit.
102 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
103 * 2) switch %%esp, ie current
104 *
105 * The interrupt must handle 2 special cases:
106 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
107 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
108 * runs in kernel space, the cpu could load tlb entries for user space
109 * pages.
110 *
111 * The good news is that cpu mmu_state is local to each cpu, no
112 * write/read ordering problems.
113 */
114
115/*
116 * TLB flush IPI:
117 *
118 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
119 * 2) Leave the mm if we are in the lazy tlb mode.
120 *
121 * Interrupts are disabled.
122 */
123
Tejun Heo02cf94c2009-01-21 17:26:06 +0900124/*
125 * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop
126 * but still used for documentation purpose but the usage is slightly
127 * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt
128 * entry calls in with the first parameter in %eax. Maybe define
129 * intrlinkage?
130 */
131#ifdef CONFIG_X86_64
132asmlinkage
133#endif
134void smp_invalidate_interrupt(struct pt_regs *regs)
Glauber Costac048fdf2008-03-03 14:12:54 -0300135{
Tejun Heo6dd01be2009-01-21 17:26:06 +0900136 unsigned int cpu;
137 unsigned int sender;
Glauber Costac048fdf2008-03-03 14:12:54 -0300138 union smp_flush_state *f;
139
140 cpu = smp_processor_id();
141 /*
142 * orig_rax contains the negated interrupt vector.
143 * Use that to determine where the sender put the data.
144 */
145 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
Frederik Deweerdt09b3ec72009-01-12 22:35:42 +0100146 f = &flush_state[sender];
Glauber Costac048fdf2008-03-03 14:12:54 -0300147
Rusty Russell4595f962009-01-10 21:58:09 -0800148 if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
Glauber Costac048fdf2008-03-03 14:12:54 -0300149 goto out;
150 /*
151 * This was a BUG() but until someone can quote me the
152 * line from the intel manual that guarantees an IPI to
153 * multiple CPUs is retried _only_ on the erroring CPUs
154 * its staying as a return
155 *
156 * BUG();
157 */
158
Alex Shic6ae41e2012-05-11 15:35:27 +0800159 if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) {
160 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
Alex Shie7b52ff2012-06-28 09:02:17 +0800161 if (f->flush_end == TLB_FLUSH_ALL
162 || !cpu_has_invlpg)
Glauber Costac048fdf2008-03-03 14:12:54 -0300163 local_flush_tlb();
Alex Shie7b52ff2012-06-28 09:02:17 +0800164 else if (!f->flush_end)
165 __flush_tlb_single(f->flush_start);
166 else {
167 unsigned long addr;
168 addr = f->flush_start;
169 while (addr < f->flush_end) {
170 __flush_tlb_single(addr);
171 addr += PAGE_SIZE;
172 }
173 }
Glauber Costac048fdf2008-03-03 14:12:54 -0300174 } else
175 leave_mm(cpu);
176 }
177out:
178 ack_APIC_irq();
Tejun Heo6dd01be2009-01-21 17:26:06 +0900179 smp_mb__before_clear_bit();
Rusty Russell4595f962009-01-10 21:58:09 -0800180 cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
Tejun Heo6dd01be2009-01-21 17:26:06 +0900181 smp_mb__after_clear_bit();
Hiroshi Shimamoto8ae93662008-12-12 15:52:26 -0800182 inc_irq_stat(irq_tlb_count);
Glauber Costac048fdf2008-03-03 14:12:54 -0300183}
184
Rusty Russell4595f962009-01-10 21:58:09 -0800185static void flush_tlb_others_ipi(const struct cpumask *cpumask,
Alex Shie7b52ff2012-06-28 09:02:17 +0800186 struct mm_struct *mm, unsigned long start,
187 unsigned long end)
Glauber Costac048fdf2008-03-03 14:12:54 -0300188{
Tejun Heo6dd01be2009-01-21 17:26:06 +0900189 unsigned int sender;
Glauber Costac048fdf2008-03-03 14:12:54 -0300190 union smp_flush_state *f;
Cliff Wickman18129242008-06-02 08:56:14 -0500191
Glauber Costac048fdf2008-03-03 14:12:54 -0300192 /* Caller has disabled preemption */
Shaohua Li93296722010-10-20 11:07:03 +0800193 sender = this_cpu_read(tlb_vector_offset);
Frederik Deweerdt09b3ec72009-01-12 22:35:42 +0100194 f = &flush_state[sender];
Glauber Costac048fdf2008-03-03 14:12:54 -0300195
Shaohua Li7064d862011-01-17 10:52:10 +0800196 if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
197 raw_spin_lock(&f->tlbstate_lock);
Glauber Costac048fdf2008-03-03 14:12:54 -0300198
199 f->flush_mm = mm;
Alex Shie7b52ff2012-06-28 09:02:17 +0800200 f->flush_start = start;
201 f->flush_end = end;
Linus Torvaldsb04e6372009-08-21 09:48:10 -0700202 if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
203 /*
204 * We have to send the IPI only to
205 * CPUs affected.
206 */
207 apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
208 INVALIDATE_TLB_VECTOR_START + sender);
Glauber Costac048fdf2008-03-03 14:12:54 -0300209
Linus Torvaldsb04e6372009-08-21 09:48:10 -0700210 while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
211 cpu_relax();
212 }
Glauber Costac048fdf2008-03-03 14:12:54 -0300213
214 f->flush_mm = NULL;
Alex Shie7b52ff2012-06-28 09:02:17 +0800215 f->flush_start = 0;
216 f->flush_end = 0;
Shaohua Li7064d862011-01-17 10:52:10 +0800217 if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
218 raw_spin_unlock(&f->tlbstate_lock);
Glauber Costac048fdf2008-03-03 14:12:54 -0300219}
220
Rusty Russell4595f962009-01-10 21:58:09 -0800221void native_flush_tlb_others(const struct cpumask *cpumask,
Alex Shie7b52ff2012-06-28 09:02:17 +0800222 struct mm_struct *mm, unsigned long start,
223 unsigned long end)
Rusty Russell4595f962009-01-10 21:58:09 -0800224{
225 if (is_uv_system()) {
Tejun Heobdbcdd42009-01-21 17:26:06 +0900226 unsigned int cpu;
Rusty Russell4595f962009-01-10 21:58:09 -0800227
Xiao Guangrong25542c62011-03-15 09:57:37 +0800228 cpu = smp_processor_id();
Alex Shie7b52ff2012-06-28 09:02:17 +0800229 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
Tejun Heobdbcdd42009-01-21 17:26:06 +0900230 if (cpumask)
Alex Shie7b52ff2012-06-28 09:02:17 +0800231 flush_tlb_others_ipi(cpumask, mm, start, end);
Mike Travis0e219902009-01-10 21:58:10 -0800232 return;
Rusty Russell4595f962009-01-10 21:58:09 -0800233 }
Alex Shie7b52ff2012-06-28 09:02:17 +0800234 flush_tlb_others_ipi(cpumask, mm, start, end);
Rusty Russell4595f962009-01-10 21:58:09 -0800235}
236
Shaohua Li93296722010-10-20 11:07:03 +0800237static void __cpuinit calculate_tlb_offset(void)
238{
Yinghai Lu92230812010-11-13 10:52:09 -0800239 int cpu, node, nr_node_vecs, idx = 0;
Shaohua Li93296722010-10-20 11:07:03 +0800240 /*
241 * we are changing tlb_vector_offset for each CPU in runtime, but this
242 * will not cause inconsistency, as the write is atomic under X86. we
243 * might see more lock contentions in a short time, but after all CPU's
244 * tlb_vector_offset are changed, everything should go normal
245 *
246 * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might
247 * waste some vectors.
248 **/
249 if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS)
250 nr_node_vecs = 1;
251 else
252 nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
253
254 for_each_online_node(node) {
Yinghai Lu92230812010-11-13 10:52:09 -0800255 int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
Shaohua Li93296722010-10-20 11:07:03 +0800256 nr_node_vecs;
257 int cpu_offset = 0;
258 for_each_cpu(cpu, cpumask_of_node(node)) {
259 per_cpu(tlb_vector_offset, cpu) = node_offset +
260 cpu_offset;
261 cpu_offset++;
262 cpu_offset = cpu_offset % nr_node_vecs;
263 }
Yinghai Lu92230812010-11-13 10:52:09 -0800264 idx++;
Shaohua Li93296722010-10-20 11:07:03 +0800265 }
266}
267
Rakib Mullickcf38d0b2010-11-01 12:53:50 +0600268static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
Shaohua Li93296722010-10-20 11:07:03 +0800269 unsigned long action, void *hcpu)
270{
271 switch (action & 0xf) {
272 case CPU_ONLINE:
273 case CPU_DEAD:
274 calculate_tlb_offset();
275 }
276 return NOTIFY_OK;
277}
278
Ingo Molnara4928cf2008-04-23 13:20:56 +0200279static int __cpuinit init_smp_flush(void)
Glauber Costac048fdf2008-03-03 14:12:54 -0300280{
281 int i;
282
Frederik Deweerdt09b3ec72009-01-12 22:35:42 +0100283 for (i = 0; i < ARRAY_SIZE(flush_state); i++)
Thomas Gleixner39c662f2009-07-25 19:15:48 +0200284 raw_spin_lock_init(&flush_state[i].tlbstate_lock);
Akinobu Mita7c04e642008-04-19 23:55:17 +0900285
Shaohua Li93296722010-10-20 11:07:03 +0800286 calculate_tlb_offset();
287 hotcpu_notifier(tlb_cpuhp_notify, 0);
Glauber Costac048fdf2008-03-03 14:12:54 -0300288 return 0;
289}
290core_initcall(init_smp_flush);
291
292void flush_tlb_current_task(void)
293{
294 struct mm_struct *mm = current->mm;
Glauber Costac048fdf2008-03-03 14:12:54 -0300295
296 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300297
298 local_flush_tlb();
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600299 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800300 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300301 preempt_enable();
302}
303
304void flush_tlb_mm(struct mm_struct *mm)
305{
Glauber Costac048fdf2008-03-03 14:12:54 -0300306 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300307
308 if (current->active_mm == mm) {
309 if (current->mm)
310 local_flush_tlb();
311 else
312 leave_mm(smp_processor_id());
313 }
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600314 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800315 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300316
317 preempt_enable();
318}
319
Alex Shid8dfe602012-06-28 09:02:18 +0800320#ifdef CONFIG_TRANSPARENT_HUGEPAGE
321static inline unsigned long has_large_page(struct mm_struct *mm,
322 unsigned long start, unsigned long end)
323{
324 pgd_t *pgd;
325 pud_t *pud;
326 pmd_t *pmd;
327 unsigned long addr = ALIGN(start, HPAGE_SIZE);
328 for (; addr < end; addr += HPAGE_SIZE) {
329 pgd = pgd_offset(mm, addr);
330 if (likely(!pgd_none(*pgd))) {
331 pud = pud_offset(pgd, addr);
332 if (likely(!pud_none(*pud))) {
333 pmd = pmd_offset(pud, addr);
334 if (likely(!pmd_none(*pmd)))
335 if (pmd_large(*pmd))
336 return addr;
337 }
338 }
339 }
340 return 0;
341}
342#else
343static inline unsigned long has_large_page(struct mm_struct *mm,
344 unsigned long start, unsigned long end)
345{
346 return 0;
347}
348#endif
Alex Shie7b52ff2012-06-28 09:02:17 +0800349void flush_tlb_range(struct vm_area_struct *vma,
350 unsigned long start, unsigned long end)
351{
352 struct mm_struct *mm;
353
Alex Shic4211f42012-06-28 09:02:19 +0800354 if (vma->vm_flags & VM_HUGETLB || tlb_flushall_shift == -1) {
Alex Shid8dfe602012-06-28 09:02:18 +0800355flush_all:
Alex Shie7b52ff2012-06-28 09:02:17 +0800356 flush_tlb_mm(vma->vm_mm);
357 return;
358 }
359
360 preempt_disable();
361 mm = vma->vm_mm;
362 if (current->active_mm == mm) {
363 if (current->mm) {
364 unsigned long addr, vmflag = vma->vm_flags;
365 unsigned act_entries, tlb_entries = 0;
366
367 if (vmflag & VM_EXEC)
368 tlb_entries = tlb_lli_4k[ENTRIES];
369 else
370 tlb_entries = tlb_lld_4k[ENTRIES];
371
372 act_entries = tlb_entries > mm->total_vm ?
373 mm->total_vm : tlb_entries;
374
Alex Shic4211f42012-06-28 09:02:19 +0800375 if ((end - start) >> PAGE_SHIFT >
376 act_entries >> tlb_flushall_shift)
Alex Shie7b52ff2012-06-28 09:02:17 +0800377 local_flush_tlb();
378 else {
Alex Shid8dfe602012-06-28 09:02:18 +0800379 if (has_large_page(mm, start, end)) {
380 preempt_enable();
381 goto flush_all;
382 }
Alex Shie7b52ff2012-06-28 09:02:17 +0800383 for (addr = start; addr < end;
384 addr += PAGE_SIZE)
385 __flush_tlb_single(addr);
386
387 if (cpumask_any_but(mm_cpumask(mm),
388 smp_processor_id()) < nr_cpu_ids)
389 flush_tlb_others(mm_cpumask(mm), mm,
390 start, end);
391 preempt_enable();
392 return;
393 }
394 } else {
395 leave_mm(smp_processor_id());
396 }
397 }
398 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
399 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
400 preempt_enable();
401}
402
403
404void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
Glauber Costac048fdf2008-03-03 14:12:54 -0300405{
406 struct mm_struct *mm = vma->vm_mm;
Glauber Costac048fdf2008-03-03 14:12:54 -0300407
408 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300409
410 if (current->active_mm == mm) {
411 if (current->mm)
Alex Shie7b52ff2012-06-28 09:02:17 +0800412 __flush_tlb_one(start);
Glauber Costac048fdf2008-03-03 14:12:54 -0300413 else
414 leave_mm(smp_processor_id());
415 }
416
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600417 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800418 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300419
420 preempt_enable();
421}
422
423static void do_flush_tlb_all(void *info)
424{
Glauber Costac048fdf2008-03-03 14:12:54 -0300425 __flush_tlb_all();
Alex Shic6ae41e2012-05-11 15:35:27 +0800426 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
Borislav Petkov3f8afb72010-07-21 14:47:05 +0200427 leave_mm(smp_processor_id());
Glauber Costac048fdf2008-03-03 14:12:54 -0300428}
429
430void flush_tlb_all(void)
431{
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200432 on_each_cpu(do_flush_tlb_all, NULL, 1);
Glauber Costac048fdf2008-03-03 14:12:54 -0300433}
Alex Shi3df32122012-06-28 09:02:20 +0800434
435#ifdef CONFIG_DEBUG_TLBFLUSH
436static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 char buf[32];
440 unsigned int len;
441
442 len = sprintf(buf, "%hd\n", tlb_flushall_shift);
443 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
444}
445
446static ssize_t tlbflush_write_file(struct file *file,
447 const char __user *user_buf, size_t count, loff_t *ppos)
448{
449 char buf[32];
450 ssize_t len;
451 s8 shift;
452
453 len = min(count, sizeof(buf) - 1);
454 if (copy_from_user(buf, user_buf, len))
455 return -EFAULT;
456
457 buf[len] = '\0';
458 if (kstrtos8(buf, 0, &shift))
459 return -EINVAL;
460
461 if (shift > 64)
462 return -EINVAL;
463
464 tlb_flushall_shift = shift;
465 return count;
466}
467
468static const struct file_operations fops_tlbflush = {
469 .read = tlbflush_read_file,
470 .write = tlbflush_write_file,
471 .llseek = default_llseek,
472};
473
474static int __cpuinit create_tlb_flushall_shift(void)
475{
476 if (cpu_has_invlpg) {
477 debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR,
478 arch_debugfs_dir, NULL, &fops_tlbflush);
479 }
480 return 0;
481}
482late_initcall(create_tlb_flushall_shift);
483#endif