blob: f78d002f0fe0dce6cc311ae1322dc17c42b7bdd5 [file] [log] [blame]
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001/*
2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
9#define pr_fmt(fmt) "xive-kvm: " fmt
10
11#include <linux/kernel.h>
12#include <linux/kvm_host.h>
13#include <linux/err.h>
14#include <linux/gfp.h>
15#include <linux/spinlock.h>
16#include <linux/delay.h>
17#include <linux/percpu.h>
18#include <linux/cpumask.h>
Al Viro5bb866d2017-12-04 14:43:20 -050019#include <linux/uaccess.h>
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100020#include <asm/kvm_book3s.h>
21#include <asm/kvm_ppc.h>
22#include <asm/hvcall.h>
23#include <asm/xics.h>
24#include <asm/xive.h>
25#include <asm/xive-regs.h>
26#include <asm/debug.h>
Paolo Bonzini4415b332017-05-09 11:50:01 +020027#include <asm/debugfs.h>
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100028#include <asm/time.h>
29#include <asm/opal.h>
30
31#include <linux/debugfs.h>
32#include <linux/seq_file.h>
33
34#include "book3s_xive.h"
35
36
37/*
38 * Virtual mode variants of the hcalls for use on radix/radix
39 * with AIL. They require the VCPU's VP to be "pushed"
40 *
Finn Thain3cc97be2018-08-23 17:00:52 -070041 * We still instantiate them here because we use some of the
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100042 * generated utility functions as well in this file.
43 */
44#define XIVE_RUNTIME_CHECKS
45#define X_PFX xive_vm_
46#define X_STATIC static
47#define X_STAT_PFX stat_vm_
48#define __x_tima xive_tima
49#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
50#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100051#define __x_writeb __raw_writeb
52#define __x_readw __raw_readw
53#define __x_readq __raw_readq
54#define __x_writeq __raw_writeq
55
56#include "book3s_xive_template.c"
57
58/*
59 * We leave a gap of a couple of interrupts in the queue to
60 * account for the IPI and additional safety guard.
61 */
62#define XIVE_Q_GAP 2
63
64/*
Paul Mackerras95a64322018-10-08 16:30:55 +110065 * Push a vcpu's context to the XIVE on guest entry.
66 * This assumes we are in virtual mode (MMU on)
67 */
68void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
69{
70 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
71 u64 pq;
72
73 if (!tima)
74 return;
75 eieio();
76 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
77 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
78 vcpu->arch.xive_pushed = 1;
79 eieio();
80
81 /*
82 * We clear the irq_pending flag. There is a small chance of a
83 * race vs. the escalation interrupt happening on another
84 * processor setting it again, but the only consequence is to
85 * cause a spurious wakeup on the next H_CEDE, which is not an
86 * issue.
87 */
88 vcpu->arch.irq_pending = 0;
89
90 /*
91 * In single escalation mode, if the escalation interrupt is
92 * on, we mask it.
93 */
94 if (vcpu->arch.xive_esc_on) {
95 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
96 XIVE_ESB_SET_PQ_01));
97 mb();
98
99 /*
100 * We have a possible subtle race here: The escalation
101 * interrupt might have fired and be on its way to the
102 * host queue while we mask it, and if we unmask it
103 * early enough (re-cede right away), there is a
104 * theorical possibility that it fires again, thus
105 * landing in the target queue more than once which is
106 * a big no-no.
107 *
108 * Fortunately, solving this is rather easy. If the
109 * above load setting PQ to 01 returns a previous
110 * value where P is set, then we know the escalation
111 * interrupt is somewhere on its way to the host. In
112 * that case we simply don't clear the xive_esc_on
113 * flag below. It will be eventually cleared by the
114 * handler for the escalation interrupt.
115 *
116 * Then, when doing a cede, we check that flag again
117 * before re-enabling the escalation interrupt, and if
118 * set, we abort the cede.
119 */
120 if (!(pq & XIVE_ESB_VAL_P))
121 /* Now P is 0, we can clear the flag */
122 vcpu->arch.xive_esc_on = 0;
123 }
124}
125EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
126
127/*
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000128 * This is a simple trigger for a generic XIVE IRQ. This must
129 * only be called for interrupts that support a trigger page
130 */
131static bool xive_irq_trigger(struct xive_irq_data *xd)
132{
133 /* This should be only for MSIs */
134 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
135 return false;
136
137 /* Those interrupts should always have a trigger page */
138 if (WARN_ON(!xd->trig_mmio))
139 return false;
140
141 out_be64(xd->trig_mmio, 0);
142
143 return true;
144}
145
146static irqreturn_t xive_esc_irq(int irq, void *data)
147{
148 struct kvm_vcpu *vcpu = data;
149
Benjamin Herrenschmidt2267ea72018-01-12 13:37:13 +1100150 vcpu->arch.irq_pending = 1;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000151 smp_mb();
152 if (vcpu->arch.ceded)
153 kvmppc_fast_vcpu_kick(vcpu);
154
Benjamin Herrenschmidt9b9b13a2018-01-12 13:37:16 +1100155 /* Since we have the no-EOI flag, the interrupt is effectively
156 * disabled now. Clearing xive_esc_on means we won't bother
157 * doing so on the next entry.
158 *
159 * This also allows the entry code to know that if a PQ combination
160 * of 10 is observed while xive_esc_on is true, it means the queue
161 * contains an unprocessed escalation interrupt. We don't make use of
162 * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
163 */
164 vcpu->arch.xive_esc_on = false;
165
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000166 return IRQ_HANDLED;
167}
168
169static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
170{
171 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
172 struct xive_q *q = &xc->queues[prio];
173 char *name = NULL;
174 int rc;
175
176 /* Already there ? */
177 if (xc->esc_virq[prio])
178 return 0;
179
180 /* Hook up the escalation interrupt */
181 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
182 if (!xc->esc_virq[prio]) {
183 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
184 prio, xc->server_num);
185 return -EIO;
186 }
187
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +1100188 if (xc->xive->single_escalation)
189 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
190 vcpu->kvm->arch.lpid, xc->server_num);
191 else
192 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
193 vcpu->kvm->arch.lpid, xc->server_num, prio);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000194 if (!name) {
195 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
196 prio, xc->server_num);
197 rc = -ENOMEM;
198 goto error;
199 }
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +1100200
201 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
202
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000203 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
204 IRQF_NO_THREAD, name, vcpu);
205 if (rc) {
206 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
207 prio, xc->server_num);
208 goto error;
209 }
210 xc->esc_virq_names[prio] = name;
Benjamin Herrenschmidt9b9b13a2018-01-12 13:37:16 +1100211
212 /* In single escalation mode, we grab the ESB MMIO of the
213 * interrupt and mask it. Also populate the VCPU v/raddr
214 * of the ESB page for use by asm entry/exit code. Finally
215 * set the XIVE_IRQ_NO_EOI flag which will prevent the
216 * core code from performing an EOI on the escalation
217 * interrupt, thus leaving it effectively masked after
218 * it fires once.
219 */
220 if (xc->xive->single_escalation) {
221 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
222 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
223
224 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
225 vcpu->arch.xive_esc_raddr = xd->eoi_page;
226 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
227 xd->flags |= XIVE_IRQ_NO_EOI;
228 }
229
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000230 return 0;
231error:
232 irq_dispose_mapping(xc->esc_virq[prio]);
233 xc->esc_virq[prio] = 0;
234 kfree(name);
235 return rc;
236}
237
238static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
239{
240 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
241 struct kvmppc_xive *xive = xc->xive;
242 struct xive_q *q = &xc->queues[prio];
243 void *qpage;
244 int rc;
245
246 if (WARN_ON(q->qpage))
247 return 0;
248
249 /* Allocate the queue and retrieve infos on current node for now */
250 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
251 if (!qpage) {
252 pr_err("Failed to allocate queue %d for VCPU %d\n",
253 prio, xc->server_num);
Ingo Molnared7158b2018-02-22 10:54:55 +0100254 return -ENOMEM;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000255 }
256 memset(qpage, 0, 1 << xive->q_order);
257
258 /*
259 * Reconfigure the queue. This will set q->qpage only once the
260 * queue is fully configured. This is a requirement for prio 0
261 * as we will stop doing EOIs for every IPI as soon as we observe
262 * qpage being non-NULL, and instead will only EOI when we receive
263 * corresponding queue 0 entries
264 */
265 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
266 xive->q_order, true);
267 if (rc)
268 pr_err("Failed to configure queue %d for VCPU %d\n",
269 prio, xc->server_num);
270 return rc;
271}
272
273/* Called with kvm_lock held */
274static int xive_check_provisioning(struct kvm *kvm, u8 prio)
275{
276 struct kvmppc_xive *xive = kvm->arch.xive;
277 struct kvm_vcpu *vcpu;
278 int i, rc;
279
280 lockdep_assert_held(&kvm->lock);
281
282 /* Already provisioned ? */
283 if (xive->qmap & (1 << prio))
284 return 0;
285
286 pr_devel("Provisioning prio... %d\n", prio);
287
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +1100288 /* Provision each VCPU and enable escalations if needed */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000289 kvm_for_each_vcpu(i, vcpu, kvm) {
290 if (!vcpu->arch.xive_vcpu)
291 continue;
292 rc = xive_provision_queue(vcpu, prio);
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +1100293 if (rc == 0 && !xive->single_escalation)
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000294 xive_attach_escalation(vcpu, prio);
295 if (rc)
296 return rc;
297 }
298
299 /* Order previous stores and mark it as provisioned */
300 mb();
301 xive->qmap |= (1 << prio);
302 return 0;
303}
304
305static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
306{
307 struct kvm_vcpu *vcpu;
308 struct kvmppc_xive_vcpu *xc;
309 struct xive_q *q;
310
311 /* Locate target server */
312 vcpu = kvmppc_xive_find_server(kvm, server);
313 if (!vcpu) {
314 pr_warn("%s: Can't find server %d\n", __func__, server);
315 return;
316 }
317 xc = vcpu->arch.xive_vcpu;
318 if (WARN_ON(!xc))
319 return;
320
321 q = &xc->queues[prio];
322 atomic_inc(&q->pending_count);
323}
324
325static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
326{
327 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
328 struct xive_q *q;
329 u32 max;
330
331 if (WARN_ON(!xc))
332 return -ENXIO;
333 if (!xc->valid)
334 return -ENXIO;
335
336 q = &xc->queues[prio];
337 if (WARN_ON(!q->qpage))
338 return -ENXIO;
339
340 /* Calculate max number of interrupts in that queue. */
341 max = (q->msk + 1) - XIVE_Q_GAP;
342 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
343}
344
345static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
346{
347 struct kvm_vcpu *vcpu;
348 int i, rc;
349
350 /* Locate target server */
351 vcpu = kvmppc_xive_find_server(kvm, *server);
352 if (!vcpu) {
353 pr_devel("Can't find server %d\n", *server);
354 return -EINVAL;
355 }
356
357 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
358
359 /* Try pick it */
360 rc = xive_try_pick_queue(vcpu, prio);
361 if (rc == 0)
362 return rc;
363
364 pr_devel(" .. failed, looking up candidate...\n");
365
366 /* Failed, pick another VCPU */
367 kvm_for_each_vcpu(i, vcpu, kvm) {
368 if (!vcpu->arch.xive_vcpu)
369 continue;
370 rc = xive_try_pick_queue(vcpu, prio);
371 if (rc == 0) {
372 *server = vcpu->arch.xive_vcpu->server_num;
373 pr_devel(" found on 0x%x/%d\n", *server, prio);
374 return rc;
375 }
376 }
377 pr_devel(" no available target !\n");
378
379 /* No available target ! */
380 return -EBUSY;
381}
382
Sam Bobroff1e175d22018-07-25 16:12:02 +1000383static u32 xive_vp(struct kvmppc_xive *xive, u32 server)
384{
385 return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
386}
387
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000388static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
389 struct kvmppc_xive_src_block *sb,
390 struct kvmppc_xive_irq_state *state)
391{
392 struct xive_irq_data *xd;
393 u32 hw_num;
394 u8 old_prio;
395 u64 val;
396
397 /*
398 * Take the lock, set masked, try again if racing
399 * with H_EOI
400 */
401 for (;;) {
402 arch_spin_lock(&sb->lock);
403 old_prio = state->guest_priority;
404 state->guest_priority = MASKED;
405 mb();
406 if (!state->in_eoi)
407 break;
408 state->guest_priority = old_prio;
409 arch_spin_unlock(&sb->lock);
410 }
411
412 /* No change ? Bail */
413 if (old_prio == MASKED)
414 return old_prio;
415
416 /* Get the right irq */
417 kvmppc_xive_select_irq(state, &hw_num, &xd);
418
419 /*
420 * If the interrupt is marked as needing masking via
421 * firmware, we do it here. Firmware masking however
422 * is "lossy", it won't return the old p and q bits
423 * and won't set the interrupt to a state where it will
424 * record queued ones. If this is an issue we should do
425 * lazy masking instead.
426 *
427 * For now, we work around this in unmask by forcing
428 * an interrupt whenever we unmask a non-LSI via FW
429 * (if ever).
430 */
431 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
432 xive_native_configure_irq(hw_num,
Sam Bobroff1e175d22018-07-25 16:12:02 +1000433 xive_vp(xive, state->act_server),
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000434 MASKED, state->number);
435 /* set old_p so we can track if an H_EOI was done */
436 state->old_p = true;
437 state->old_q = false;
438 } else {
439 /* Set PQ to 10, return old P and old Q and remember them */
440 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
441 state->old_p = !!(val & 2);
442 state->old_q = !!(val & 1);
443
444 /*
445 * Synchronize hardware to sensure the queues are updated
446 * when masking
447 */
448 xive_native_sync_source(hw_num);
449 }
450
451 return old_prio;
452}
453
454static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
455 struct kvmppc_xive_irq_state *state)
456{
457 /*
458 * Take the lock try again if racing with H_EOI
459 */
460 for (;;) {
461 arch_spin_lock(&sb->lock);
462 if (!state->in_eoi)
463 break;
464 arch_spin_unlock(&sb->lock);
465 }
466}
467
468static void xive_finish_unmask(struct kvmppc_xive *xive,
469 struct kvmppc_xive_src_block *sb,
470 struct kvmppc_xive_irq_state *state,
471 u8 prio)
472{
473 struct xive_irq_data *xd;
474 u32 hw_num;
475
476 /* If we aren't changing a thing, move on */
477 if (state->guest_priority != MASKED)
478 goto bail;
479
480 /* Get the right irq */
481 kvmppc_xive_select_irq(state, &hw_num, &xd);
482
483 /*
484 * See command in xive_lock_and_mask() concerning masking
485 * via firmware.
486 */
487 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
488 xive_native_configure_irq(hw_num,
Sam Bobroff1e175d22018-07-25 16:12:02 +1000489 xive_vp(xive, state->act_server),
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000490 state->act_priority, state->number);
491 /* If an EOI is needed, do it here */
492 if (!state->old_p)
493 xive_vm_source_eoi(hw_num, xd);
494 /* If this is not an LSI, force a trigger */
495 if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
496 xive_irq_trigger(xd);
497 goto bail;
498 }
499
500 /* Old Q set, set PQ to 11 */
501 if (state->old_q)
502 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
503
504 /*
505 * If not old P, then perform an "effective" EOI,
506 * on the source. This will handle the cases where
507 * FW EOI is needed.
508 */
509 if (!state->old_p)
510 xive_vm_source_eoi(hw_num, xd);
511
512 /* Synchronize ordering and mark unmasked */
513 mb();
514bail:
515 state->guest_priority = prio;
516}
517
518/*
519 * Target an interrupt to a given server/prio, this will fallback
520 * to another server if necessary and perform the HW targetting
521 * updates as needed
522 *
523 * NOTE: Must be called with the state lock held
524 */
525static int xive_target_interrupt(struct kvm *kvm,
526 struct kvmppc_xive_irq_state *state,
527 u32 server, u8 prio)
528{
529 struct kvmppc_xive *xive = kvm->arch.xive;
530 u32 hw_num;
531 int rc;
532
533 /*
534 * This will return a tentative server and actual
535 * priority. The count for that new target will have
536 * already been incremented.
537 */
538 rc = xive_select_target(kvm, &server, prio);
539
540 /*
541 * We failed to find a target ? Not much we can do
542 * at least until we support the GIQ.
543 */
544 if (rc)
545 return rc;
546
547 /*
548 * Increment the old queue pending count if there
549 * was one so that the old queue count gets adjusted later
550 * when observed to be empty.
551 */
552 if (state->act_priority != MASKED)
553 xive_inc_q_pending(kvm,
554 state->act_server,
555 state->act_priority);
556 /*
557 * Update state and HW
558 */
559 state->act_priority = prio;
560 state->act_server = server;
561
562 /* Get the right irq */
563 kvmppc_xive_select_irq(state, &hw_num, NULL);
564
565 return xive_native_configure_irq(hw_num,
Sam Bobroff1e175d22018-07-25 16:12:02 +1000566 xive_vp(xive, server),
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000567 prio, state->number);
568}
569
570/*
571 * Targetting rules: In order to avoid losing track of
572 * pending interrupts accross mask and unmask, which would
573 * allow queue overflows, we implement the following rules:
574 *
575 * - Unless it was never enabled (or we run out of capacity)
576 * an interrupt is always targetted at a valid server/queue
577 * pair even when "masked" by the guest. This pair tends to
578 * be the last one used but it can be changed under some
579 * circumstances. That allows us to separate targetting
580 * from masking, we only handle accounting during (re)targetting,
581 * this also allows us to let an interrupt drain into its target
582 * queue after masking, avoiding complex schemes to remove
583 * interrupts out of remote processor queues.
584 *
585 * - When masking, we set PQ to 10 and save the previous value
586 * of P and Q.
587 *
588 * - When unmasking, if saved Q was set, we set PQ to 11
589 * otherwise we leave PQ to the HW state which will be either
590 * 10 if nothing happened or 11 if the interrupt fired while
591 * masked. Effectively we are OR'ing the previous Q into the
592 * HW Q.
593 *
594 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
595 * which will unmask the interrupt and shoot a new one if Q was
596 * set.
597 *
598 * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
599 * effectively meaning an H_EOI from the guest is still expected
600 * for that interrupt).
601 *
602 * - If H_EOI occurs while masked, we clear the saved P.
603 *
604 * - When changing target, we account on the new target and
605 * increment a separate "pending" counter on the old one.
606 * This pending counter will be used to decrement the old
607 * target's count when its queue has been observed empty.
608 */
609
610int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
611 u32 priority)
612{
613 struct kvmppc_xive *xive = kvm->arch.xive;
614 struct kvmppc_xive_src_block *sb;
615 struct kvmppc_xive_irq_state *state;
616 u8 new_act_prio;
617 int rc = 0;
618 u16 idx;
619
620 if (!xive)
621 return -ENODEV;
622
623 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
624 irq, server, priority);
625
626 /* First, check provisioning of queues */
627 if (priority != MASKED)
628 rc = xive_check_provisioning(xive->kvm,
629 xive_prio_from_guest(priority));
630 if (rc) {
631 pr_devel(" provisioning failure %d !\n", rc);
632 return rc;
633 }
634
635 sb = kvmppc_xive_find_source(xive, irq, &idx);
636 if (!sb)
637 return -EINVAL;
638 state = &sb->irq_state[idx];
639
640 /*
641 * We first handle masking/unmasking since the locking
642 * might need to be retried due to EOIs, we'll handle
643 * targetting changes later. These functions will return
644 * with the SB lock held.
645 *
646 * xive_lock_and_mask() will also set state->guest_priority
647 * but won't otherwise change other fields of the state.
648 *
649 * xive_lock_for_unmask will not actually unmask, this will
650 * be done later by xive_finish_unmask() once the targetting
651 * has been done, so we don't try to unmask an interrupt
652 * that hasn't yet been targetted.
653 */
654 if (priority == MASKED)
655 xive_lock_and_mask(xive, sb, state);
656 else
657 xive_lock_for_unmask(sb, state);
658
659
660 /*
661 * Then we handle targetting.
662 *
663 * First calculate a new "actual priority"
664 */
665 new_act_prio = state->act_priority;
666 if (priority != MASKED)
667 new_act_prio = xive_prio_from_guest(priority);
668
669 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
670 new_act_prio, state->act_server, state->act_priority);
671
672 /*
673 * Then check if we actually need to change anything,
674 *
675 * The condition for re-targetting the interrupt is that
676 * we have a valid new priority (new_act_prio is not 0xff)
677 * and either the server or the priority changed.
678 *
679 * Note: If act_priority was ff and the new priority is
680 * also ff, we don't do anything and leave the interrupt
681 * untargetted. An attempt of doing an int_on on an
682 * untargetted interrupt will fail. If that is a problem
683 * we could initialize interrupts with valid default
684 */
685
686 if (new_act_prio != MASKED &&
687 (state->act_server != server ||
688 state->act_priority != new_act_prio))
689 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
690
691 /*
692 * Perform the final unmasking of the interrupt source
693 * if necessary
694 */
695 if (priority != MASKED)
696 xive_finish_unmask(xive, sb, state, priority);
697
698 /*
699 * Finally Update saved_priority to match. Only int_on/off
700 * set this field to a different value.
701 */
702 state->saved_priority = priority;
703
704 arch_spin_unlock(&sb->lock);
705 return rc;
706}
707
708int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
709 u32 *priority)
710{
711 struct kvmppc_xive *xive = kvm->arch.xive;
712 struct kvmppc_xive_src_block *sb;
713 struct kvmppc_xive_irq_state *state;
714 u16 idx;
715
716 if (!xive)
717 return -ENODEV;
718
719 sb = kvmppc_xive_find_source(xive, irq, &idx);
720 if (!sb)
721 return -EINVAL;
722 state = &sb->irq_state[idx];
723 arch_spin_lock(&sb->lock);
Sam Bobroff2fb1e942017-09-26 16:47:04 +1000724 *server = state->act_server;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000725 *priority = state->guest_priority;
726 arch_spin_unlock(&sb->lock);
727
728 return 0;
729}
730
731int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
732{
733 struct kvmppc_xive *xive = kvm->arch.xive;
734 struct kvmppc_xive_src_block *sb;
735 struct kvmppc_xive_irq_state *state;
736 u16 idx;
737
738 if (!xive)
739 return -ENODEV;
740
741 sb = kvmppc_xive_find_source(xive, irq, &idx);
742 if (!sb)
743 return -EINVAL;
744 state = &sb->irq_state[idx];
745
746 pr_devel("int_on(irq=0x%x)\n", irq);
747
748 /*
749 * Check if interrupt was not targetted
750 */
751 if (state->act_priority == MASKED) {
752 pr_devel("int_on on untargetted interrupt\n");
753 return -EINVAL;
754 }
755
756 /* If saved_priority is 0xff, do nothing */
757 if (state->saved_priority == MASKED)
758 return 0;
759
760 /*
761 * Lock and unmask it.
762 */
763 xive_lock_for_unmask(sb, state);
764 xive_finish_unmask(xive, sb, state, state->saved_priority);
765 arch_spin_unlock(&sb->lock);
766
767 return 0;
768}
769
770int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
771{
772 struct kvmppc_xive *xive = kvm->arch.xive;
773 struct kvmppc_xive_src_block *sb;
774 struct kvmppc_xive_irq_state *state;
775 u16 idx;
776
777 if (!xive)
778 return -ENODEV;
779
780 sb = kvmppc_xive_find_source(xive, irq, &idx);
781 if (!sb)
782 return -EINVAL;
783 state = &sb->irq_state[idx];
784
785 pr_devel("int_off(irq=0x%x)\n", irq);
786
787 /*
788 * Lock and mask
789 */
790 state->saved_priority = xive_lock_and_mask(xive, sb, state);
791 arch_spin_unlock(&sb->lock);
792
793 return 0;
794}
795
796static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
797{
798 struct kvmppc_xive_src_block *sb;
799 struct kvmppc_xive_irq_state *state;
800 u16 idx;
801
802 sb = kvmppc_xive_find_source(xive, irq, &idx);
803 if (!sb)
804 return false;
805 state = &sb->irq_state[idx];
806 if (!state->valid)
807 return false;
808
809 /*
810 * Trigger the IPI. This assumes we never restore a pass-through
811 * interrupt which should be safe enough
812 */
813 xive_irq_trigger(&state->ipi_data);
814
815 return true;
816}
817
818u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
819{
820 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
821
822 if (!xc)
823 return 0;
824
825 /* Return the per-cpu state for state saving/migration */
826 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
Laurent Vivier7333b5a2017-12-12 18:23:56 +0100827 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
828 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000829}
830
831int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
832{
833 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
834 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
835 u8 cppr, mfrr;
836 u32 xisr;
837
838 if (!xc || !xive)
839 return -ENOENT;
840
841 /* Grab individual state fields. We don't use pending_pri */
842 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
843 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
844 KVM_REG_PPC_ICP_XISR_MASK;
845 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
846
847 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
848 xc->server_num, cppr, mfrr, xisr);
849
850 /*
851 * We can't update the state of a "pushed" VCPU, but that
852 * shouldn't happen.
853 */
854 if (WARN_ON(vcpu->arch.xive_pushed))
855 return -EIO;
856
857 /* Update VCPU HW saved state */
858 vcpu->arch.xive_saved_state.cppr = cppr;
859 xc->hw_cppr = xc->cppr = cppr;
860
861 /*
862 * Update MFRR state. If it's not 0xff, we mark the VCPU as
863 * having a pending MFRR change, which will re-evaluate the
864 * target. The VCPU will thus potentially get a spurious
865 * interrupt but that's not a big deal.
866 */
867 xc->mfrr = mfrr;
868 if (mfrr < cppr)
869 xive_irq_trigger(&xc->vp_ipi_data);
870
871 /*
872 * Now saved XIRR is "interesting". It means there's something in
873 * the legacy "1 element" queue... for an IPI we simply ignore it,
874 * as the MFRR restore will handle that. For anything else we need
875 * to force a resend of the source.
876 * However the source may not have been setup yet. If that's the
877 * case, we keep that info and increment a counter in the xive to
878 * tell subsequent xive_set_source() to go look.
879 */
880 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
881 xc->delayed_irq = xisr;
882 xive->delayed_irqs++;
883 pr_devel(" xisr restore delayed\n");
884 }
885
886 return 0;
887}
888
889int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
890 struct irq_desc *host_desc)
891{
892 struct kvmppc_xive *xive = kvm->arch.xive;
893 struct kvmppc_xive_src_block *sb;
894 struct kvmppc_xive_irq_state *state;
895 struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
896 unsigned int host_irq = irq_desc_get_irq(host_desc);
897 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
898 u16 idx;
899 u8 prio;
900 int rc;
901
902 if (!xive)
903 return -ENODEV;
904
905 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
906
907 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
908 if (!sb)
909 return -EINVAL;
910 state = &sb->irq_state[idx];
911
912 /*
913 * Mark the passed-through interrupt as going to a VCPU,
914 * this will prevent further EOIs and similar operations
915 * from the XIVE code. It will also mask the interrupt
916 * to either PQ=10 or 11 state, the latter if the interrupt
917 * is pending. This will allow us to unmask or retrigger it
918 * after routing it to the guest with a simple EOI.
919 *
920 * The "state" argument is a "token", all it needs is to be
921 * non-NULL to switch to passed-through or NULL for the
922 * other way around. We may not yet have an actual VCPU
923 * target here and we don't really care.
924 */
925 rc = irq_set_vcpu_affinity(host_irq, state);
926 if (rc) {
927 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
928 return rc;
929 }
930
931 /*
932 * Mask and read state of IPI. We need to know if its P bit
933 * is set as that means it's potentially already using a
934 * queue entry in the target
935 */
936 prio = xive_lock_and_mask(xive, sb, state);
937 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
938 state->old_p, state->old_q);
939
940 /* Turn the IPI hard off */
941 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
942
943 /* Grab info about irq */
944 state->pt_number = hw_irq;
945 state->pt_data = irq_data_get_irq_handler_data(host_data);
946
947 /*
948 * Configure the IRQ to match the existing configuration of
949 * the IPI if it was already targetted. Otherwise this will
950 * mask the interrupt in a lossy way (act_priority is 0xff)
951 * which is fine for a never started interrupt.
952 */
953 xive_native_configure_irq(hw_irq,
Sam Bobroff1e175d22018-07-25 16:12:02 +1000954 xive_vp(xive, state->act_server),
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000955 state->act_priority, state->number);
956
957 /*
958 * We do an EOI to enable the interrupt (and retrigger if needed)
959 * if the guest has the interrupt unmasked and the P bit was *not*
960 * set in the IPI. If it was set, we know a slot may still be in
961 * use in the target queue thus we have to wait for a guest
962 * originated EOI
963 */
964 if (prio != MASKED && !state->old_p)
965 xive_vm_source_eoi(hw_irq, state->pt_data);
966
967 /* Clear old_p/old_q as they are no longer relevant */
968 state->old_p = state->old_q = false;
969
970 /* Restore guest prio (unlocks EOI) */
971 mb();
972 state->guest_priority = prio;
973 arch_spin_unlock(&sb->lock);
974
975 return 0;
976}
977EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
978
979int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
980 struct irq_desc *host_desc)
981{
982 struct kvmppc_xive *xive = kvm->arch.xive;
983 struct kvmppc_xive_src_block *sb;
984 struct kvmppc_xive_irq_state *state;
985 unsigned int host_irq = irq_desc_get_irq(host_desc);
986 u16 idx;
987 u8 prio;
988 int rc;
989
990 if (!xive)
991 return -ENODEV;
992
993 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
994
995 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
996 if (!sb)
997 return -EINVAL;
998 state = &sb->irq_state[idx];
999
1000 /*
1001 * Mask and read state of IRQ. We need to know if its P bit
1002 * is set as that means it's potentially already using a
1003 * queue entry in the target
1004 */
1005 prio = xive_lock_and_mask(xive, sb, state);
1006 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1007 state->old_p, state->old_q);
1008
1009 /*
1010 * If old_p is set, the interrupt is pending, we switch it to
1011 * PQ=11. This will force a resend in the host so the interrupt
1012 * isn't lost to whatver host driver may pick it up
1013 */
1014 if (state->old_p)
1015 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1016
1017 /* Release the passed-through interrupt to the host */
1018 rc = irq_set_vcpu_affinity(host_irq, NULL);
1019 if (rc) {
1020 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
1021 return rc;
1022 }
1023
1024 /* Forget about the IRQ */
1025 state->pt_number = 0;
1026 state->pt_data = NULL;
1027
1028 /* Reconfigure the IPI */
1029 xive_native_configure_irq(state->ipi_number,
Sam Bobroff1e175d22018-07-25 16:12:02 +10001030 xive_vp(xive, state->act_server),
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001031 state->act_priority, state->number);
1032
1033 /*
1034 * If old_p is set (we have a queue entry potentially
1035 * occupied) or the interrupt is masked, we set the IPI
1036 * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
1037 */
1038 if (prio == MASKED || state->old_p)
1039 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1040 else
1041 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1042
1043 /* Restore guest prio (unlocks EOI) */
1044 mb();
1045 state->guest_priority = prio;
1046 arch_spin_unlock(&sb->lock);
1047
1048 return 0;
1049}
1050EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1051
1052static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1053{
1054 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1055 struct kvm *kvm = vcpu->kvm;
1056 struct kvmppc_xive *xive = kvm->arch.xive;
1057 int i, j;
1058
1059 for (i = 0; i <= xive->max_sbid; i++) {
1060 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1061
1062 if (!sb)
1063 continue;
1064 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1065 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1066
1067 if (!state->valid)
1068 continue;
1069 if (state->act_priority == MASKED)
1070 continue;
1071 if (state->act_server != xc->server_num)
1072 continue;
1073
1074 /* Clean it up */
1075 arch_spin_lock(&sb->lock);
1076 state->act_priority = MASKED;
1077 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1078 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1079 if (state->pt_number) {
1080 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1081 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1082 }
1083 arch_spin_unlock(&sb->lock);
1084 }
1085 }
1086}
1087
1088void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1089{
1090 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1091 struct kvmppc_xive *xive = xc->xive;
1092 int i;
1093
1094 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1095
1096 /* Ensure no interrupt is still routed to that VP */
1097 xc->valid = false;
1098 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1099
1100 /* Mask the VP IPI */
1101 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1102
1103 /* Disable the VP */
1104 xive_native_disable_vp(xc->vp_id);
1105
1106 /* Free the queues & associated interrupts */
1107 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1108 struct xive_q *q = &xc->queues[i];
1109
1110 /* Free the escalation irq */
1111 if (xc->esc_virq[i]) {
1112 free_irq(xc->esc_virq[i], vcpu);
1113 irq_dispose_mapping(xc->esc_virq[i]);
1114 kfree(xc->esc_virq_names[i]);
1115 }
1116 /* Free the queue */
1117 xive_native_disable_queue(xc->vp_id, q, i);
1118 if (q->qpage) {
1119 free_pages((unsigned long)q->qpage,
1120 xive->q_page_order);
1121 q->qpage = NULL;
1122 }
1123 }
1124
1125 /* Free the IPI */
1126 if (xc->vp_ipi) {
1127 xive_cleanup_irq_data(&xc->vp_ipi_data);
1128 xive_native_free_irq(xc->vp_ipi);
1129 }
1130 /* Free the VP */
1131 kfree(xc);
1132}
1133
1134int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1135 struct kvm_vcpu *vcpu, u32 cpu)
1136{
1137 struct kvmppc_xive *xive = dev->private;
1138 struct kvmppc_xive_vcpu *xc;
1139 int i, r = -EBUSY;
1140
1141 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1142
1143 if (dev->ops != &kvm_xive_ops) {
1144 pr_devel("Wrong ops !\n");
1145 return -EPERM;
1146 }
1147 if (xive->kvm != vcpu->kvm)
1148 return -EPERM;
1149 if (vcpu->arch.irq_type)
1150 return -EBUSY;
1151 if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
1152 pr_devel("Duplicate !\n");
1153 return -EEXIST;
1154 }
Sam Bobroff1e175d22018-07-25 16:12:02 +10001155 if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001156 pr_devel("Out of bounds !\n");
1157 return -EINVAL;
1158 }
1159 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1160 if (!xc)
1161 return -ENOMEM;
1162
1163 /* We need to synchronize with queue provisioning */
1164 mutex_lock(&vcpu->kvm->lock);
1165 vcpu->arch.xive_vcpu = xc;
1166 xc->xive = xive;
1167 xc->vcpu = vcpu;
1168 xc->server_num = cpu;
Sam Bobroff1e175d22018-07-25 16:12:02 +10001169 xc->vp_id = xive_vp(xive, cpu);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001170 xc->mfrr = 0xff;
1171 xc->valid = true;
1172
1173 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1174 if (r)
1175 goto bail;
1176
1177 /* Configure VCPU fields for use by assembly push/pull */
1178 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1179 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1180
1181 /* Allocate IPI */
1182 xc->vp_ipi = xive_native_alloc_irq();
1183 if (!xc->vp_ipi) {
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001184 pr_err("Failed to allocate xive irq for VCPU IPI\n");
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001185 r = -EIO;
1186 goto bail;
1187 }
1188 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1189
1190 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1191 if (r)
1192 goto bail;
1193
1194 /*
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001195 * Enable the VP first as the single escalation mode will
1196 * affect escalation interrupts numbering
1197 */
1198 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1199 if (r) {
1200 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1201 goto bail;
1202 }
1203
1204 /*
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001205 * Initialize queues. Initially we set them all for no queueing
1206 * and we enable escalation for queue 0 only which we'll use for
1207 * our mfrr change notifications. If the VCPU is hot-plugged, we
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001208 * do handle provisioning however based on the existing "map"
1209 * of enabled queues.
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001210 */
1211 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1212 struct xive_q *q = &xc->queues[i];
1213
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001214 /* Single escalation, no queue 7 */
1215 if (i == 7 && xive->single_escalation)
1216 break;
1217
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001218 /* Is queue already enabled ? Provision it */
1219 if (xive->qmap & (1 << i)) {
1220 r = xive_provision_queue(vcpu, i);
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001221 if (r == 0 && !xive->single_escalation)
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001222 xive_attach_escalation(vcpu, i);
1223 if (r)
1224 goto bail;
1225 } else {
1226 r = xive_native_configure_queue(xc->vp_id,
1227 q, i, NULL, 0, true);
1228 if (r) {
1229 pr_err("Failed to configure queue %d for VCPU %d\n",
1230 i, cpu);
1231 goto bail;
1232 }
1233 }
1234 }
1235
1236 /* If not done above, attach priority 0 escalation */
1237 r = xive_attach_escalation(vcpu, 0);
1238 if (r)
1239 goto bail;
1240
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001241 /* Route the IPI */
1242 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1243 if (!r)
1244 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1245
1246bail:
1247 mutex_unlock(&vcpu->kvm->lock);
1248 if (r) {
1249 kvmppc_xive_cleanup_vcpu(vcpu);
1250 return r;
1251 }
1252
1253 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1254 return 0;
1255}
1256
1257/*
1258 * Scanning of queues before/after migration save
1259 */
1260static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1261{
1262 struct kvmppc_xive_src_block *sb;
1263 struct kvmppc_xive_irq_state *state;
1264 u16 idx;
1265
1266 sb = kvmppc_xive_find_source(xive, irq, &idx);
1267 if (!sb)
1268 return;
1269
1270 state = &sb->irq_state[idx];
1271
1272 /* Some sanity checking */
1273 if (!state->valid) {
1274 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1275 return;
1276 }
1277
1278 /*
1279 * If the interrupt is in a queue it should have P set.
1280 * We warn so that gets reported. A backtrace isn't useful
1281 * so no need to use a WARN_ON.
1282 */
1283 if (!state->saved_p)
1284 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1285
1286 /* Set flag */
1287 state->in_queue = true;
1288}
1289
1290static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1291 struct kvmppc_xive_src_block *sb,
1292 u32 irq)
1293{
1294 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1295
1296 if (!state->valid)
1297 return;
1298
1299 /* Mask and save state, this will also sync HW queues */
1300 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1301
1302 /* Transfer P and Q */
1303 state->saved_p = state->old_p;
1304 state->saved_q = state->old_q;
1305
1306 /* Unlock */
1307 arch_spin_unlock(&sb->lock);
1308}
1309
1310static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1311 struct kvmppc_xive_src_block *sb,
1312 u32 irq)
1313{
1314 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1315
1316 if (!state->valid)
1317 return;
1318
1319 /*
1320 * Lock / exclude EOI (not technically necessary if the
1321 * guest isn't running concurrently. If this becomes a
1322 * performance issue we can probably remove the lock.
1323 */
1324 xive_lock_for_unmask(sb, state);
1325
1326 /* Restore mask/prio if it wasn't masked */
1327 if (state->saved_scan_prio != MASKED)
1328 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1329
1330 /* Unlock */
1331 arch_spin_unlock(&sb->lock);
1332}
1333
1334static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1335{
1336 u32 idx = q->idx;
1337 u32 toggle = q->toggle;
1338 u32 irq;
1339
1340 do {
1341 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1342 if (irq > XICS_IPI)
1343 xive_pre_save_set_queued(xive, irq);
1344 } while(irq);
1345}
1346
1347static void xive_pre_save_scan(struct kvmppc_xive *xive)
1348{
1349 struct kvm_vcpu *vcpu = NULL;
1350 int i, j;
1351
1352 /*
1353 * See comment in xive_get_source() about how this
1354 * work. Collect a stable state for all interrupts
1355 */
1356 for (i = 0; i <= xive->max_sbid; i++) {
1357 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1358 if (!sb)
1359 continue;
1360 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1361 xive_pre_save_mask_irq(xive, sb, j);
1362 }
1363
1364 /* Then scan the queues and update the "in_queue" flag */
1365 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1366 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1367 if (!xc)
1368 continue;
1369 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
Paul Mackerras00c14752017-06-30 16:39:55 +10001370 if (xc->queues[j].qpage)
1371 xive_pre_save_queue(xive, &xc->queues[j]);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001372 }
1373 }
1374
1375 /* Finally restore interrupt states */
1376 for (i = 0; i <= xive->max_sbid; i++) {
1377 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1378 if (!sb)
1379 continue;
1380 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1381 xive_pre_save_unmask_irq(xive, sb, j);
1382 }
1383}
1384
1385static void xive_post_save_scan(struct kvmppc_xive *xive)
1386{
1387 u32 i, j;
1388
1389 /* Clear all the in_queue flags */
1390 for (i = 0; i <= xive->max_sbid; i++) {
1391 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1392 if (!sb)
1393 continue;
1394 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1395 sb->irq_state[j].in_queue = false;
1396 }
1397
1398 /* Next get_source() will do a new scan */
1399 xive->saved_src_count = 0;
1400}
1401
1402/*
1403 * This returns the source configuration and state to user space.
1404 */
1405static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1406{
1407 struct kvmppc_xive_src_block *sb;
1408 struct kvmppc_xive_irq_state *state;
1409 u64 __user *ubufp = (u64 __user *) addr;
1410 u64 val, prio;
1411 u16 idx;
1412
1413 sb = kvmppc_xive_find_source(xive, irq, &idx);
1414 if (!sb)
1415 return -ENOENT;
1416
1417 state = &sb->irq_state[idx];
1418
1419 if (!state->valid)
1420 return -ENOENT;
1421
1422 pr_devel("get_source(%ld)...\n", irq);
1423
1424 /*
1425 * So to properly save the state into something that looks like a
1426 * XICS migration stream we cannot treat interrupts individually.
1427 *
1428 * We need, instead, mask them all (& save their previous PQ state)
1429 * to get a stable state in the HW, then sync them to ensure that
1430 * any interrupt that had already fired hits its queue, and finally
1431 * scan all the queues to collect which interrupts are still present
1432 * in the queues, so we can set the "pending" flag on them and
1433 * they can be resent on restore.
1434 *
1435 * So we do it all when the "first" interrupt gets saved, all the
1436 * state is collected at that point, the rest of xive_get_source()
1437 * will merely collect and convert that state to the expected
1438 * userspace bit mask.
1439 */
1440 if (xive->saved_src_count == 0)
1441 xive_pre_save_scan(xive);
1442 xive->saved_src_count++;
1443
1444 /* Convert saved state into something compatible with xics */
Sam Bobroff2fb1e942017-09-26 16:47:04 +10001445 val = state->act_server;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001446 prio = state->saved_scan_prio;
1447
1448 if (prio == MASKED) {
1449 val |= KVM_XICS_MASKED;
1450 prio = state->saved_priority;
1451 }
1452 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1453 if (state->lsi) {
1454 val |= KVM_XICS_LEVEL_SENSITIVE;
1455 if (state->saved_p)
1456 val |= KVM_XICS_PENDING;
1457 } else {
1458 if (state->saved_p)
1459 val |= KVM_XICS_PRESENTED;
1460
1461 if (state->saved_q)
1462 val |= KVM_XICS_QUEUED;
1463
1464 /*
1465 * We mark it pending (which will attempt a re-delivery)
1466 * if we are in a queue *or* we were masked and had
1467 * Q set which is equivalent to the XICS "masked pending"
1468 * state
1469 */
1470 if (state->in_queue || (prio == MASKED && state->saved_q))
1471 val |= KVM_XICS_PENDING;
1472 }
1473
1474 /*
1475 * If that was the last interrupt saved, reset the
1476 * in_queue flags
1477 */
1478 if (xive->saved_src_count == xive->src_count)
1479 xive_post_save_scan(xive);
1480
1481 /* Copy the result to userspace */
1482 if (put_user(val, ubufp))
1483 return -EFAULT;
1484
1485 return 0;
1486}
1487
1488static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
1489 int irq)
1490{
1491 struct kvm *kvm = xive->kvm;
1492 struct kvmppc_xive_src_block *sb;
1493 int i, bid;
1494
1495 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1496
1497 mutex_lock(&kvm->lock);
1498
1499 /* block already exists - somebody else got here first */
1500 if (xive->src_blocks[bid])
1501 goto out;
1502
1503 /* Create the ICS */
1504 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1505 if (!sb)
1506 goto out;
1507
1508 sb->id = bid;
1509
1510 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1511 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1512 sb->irq_state[i].guest_priority = MASKED;
1513 sb->irq_state[i].saved_priority = MASKED;
1514 sb->irq_state[i].act_priority = MASKED;
1515 }
1516 smp_wmb();
1517 xive->src_blocks[bid] = sb;
1518
1519 if (bid > xive->max_sbid)
1520 xive->max_sbid = bid;
1521
1522out:
1523 mutex_unlock(&kvm->lock);
1524 return xive->src_blocks[bid];
1525}
1526
1527static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1528{
1529 struct kvm *kvm = xive->kvm;
1530 struct kvm_vcpu *vcpu = NULL;
1531 int i;
1532
1533 kvm_for_each_vcpu(i, vcpu, kvm) {
1534 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1535
1536 if (!xc)
1537 continue;
1538
1539 if (xc->delayed_irq == irq) {
1540 xc->delayed_irq = 0;
1541 xive->delayed_irqs--;
1542 return true;
1543 }
1544 }
1545 return false;
1546}
1547
1548static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1549{
1550 struct kvmppc_xive_src_block *sb;
1551 struct kvmppc_xive_irq_state *state;
1552 u64 __user *ubufp = (u64 __user *) addr;
1553 u16 idx;
1554 u64 val;
1555 u8 act_prio, guest_prio;
1556 u32 server;
1557 int rc = 0;
1558
1559 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1560 return -ENOENT;
1561
1562 pr_devel("set_source(irq=0x%lx)\n", irq);
1563
1564 /* Find the source */
1565 sb = kvmppc_xive_find_source(xive, irq, &idx);
1566 if (!sb) {
1567 pr_devel("No source, creating source block...\n");
1568 sb = xive_create_src_block(xive, irq);
1569 if (!sb) {
1570 pr_devel("Failed to create block...\n");
1571 return -ENOMEM;
1572 }
1573 }
1574 state = &sb->irq_state[idx];
1575
1576 /* Read user passed data */
1577 if (get_user(val, ubufp)) {
1578 pr_devel("fault getting user info !\n");
1579 return -EFAULT;
1580 }
1581
1582 server = val & KVM_XICS_DESTINATION_MASK;
1583 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1584
1585 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1586 val, server, guest_prio);
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001587
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001588 /*
1589 * If the source doesn't already have an IPI, allocate
1590 * one and get the corresponding data
1591 */
1592 if (!state->ipi_number) {
1593 state->ipi_number = xive_native_alloc_irq();
1594 if (state->ipi_number == 0) {
1595 pr_devel("Failed to allocate IPI !\n");
1596 return -ENOMEM;
1597 }
1598 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1599 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1600 }
1601
1602 /*
1603 * We use lock_and_mask() to set us in the right masked
1604 * state. We will override that state from the saved state
1605 * further down, but this will handle the cases of interrupts
1606 * that need FW masking. We set the initial guest_priority to
1607 * 0 before calling it to ensure it actually performs the masking.
1608 */
1609 state->guest_priority = 0;
1610 xive_lock_and_mask(xive, sb, state);
1611
1612 /*
1613 * Now, we select a target if we have one. If we don't we
1614 * leave the interrupt untargetted. It means that an interrupt
1615 * can become "untargetted" accross migration if it was masked
1616 * by set_xive() but there is little we can do about it.
1617 */
1618
1619 /* First convert prio and mark interrupt as untargetted */
1620 act_prio = xive_prio_from_guest(guest_prio);
1621 state->act_priority = MASKED;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001622
1623 /*
1624 * We need to drop the lock due to the mutex below. Hopefully
1625 * nothing is touching that interrupt yet since it hasn't been
1626 * advertized to a running guest yet
1627 */
1628 arch_spin_unlock(&sb->lock);
1629
1630 /* If we have a priority target the interrupt */
1631 if (act_prio != MASKED) {
1632 /* First, check provisioning of queues */
1633 mutex_lock(&xive->kvm->lock);
1634 rc = xive_check_provisioning(xive->kvm, act_prio);
1635 mutex_unlock(&xive->kvm->lock);
1636
1637 /* Target interrupt */
1638 if (rc == 0)
1639 rc = xive_target_interrupt(xive->kvm, state,
1640 server, act_prio);
1641 /*
1642 * If provisioning or targetting failed, leave it
1643 * alone and masked. It will remain disabled until
1644 * the guest re-targets it.
1645 */
1646 }
1647
1648 /*
1649 * Find out if this was a delayed irq stashed in an ICP,
1650 * in which case, treat it as pending
1651 */
1652 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1653 val |= KVM_XICS_PENDING;
1654 pr_devel(" Found delayed ! forcing PENDING !\n");
1655 }
1656
1657 /* Cleanup the SW state */
1658 state->old_p = false;
1659 state->old_q = false;
1660 state->lsi = false;
1661 state->asserted = false;
1662
1663 /* Restore LSI state */
1664 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1665 state->lsi = true;
1666 if (val & KVM_XICS_PENDING)
1667 state->asserted = true;
1668 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1669 }
1670
1671 /*
1672 * Restore P and Q. If the interrupt was pending, we
Cédric Le Goaterdc1c4162017-12-12 12:02:04 +00001673 * force Q and !P, which will trigger a resend.
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001674 *
1675 * That means that a guest that had both an interrupt
1676 * pending (queued) and Q set will restore with only
1677 * one instance of that interrupt instead of 2, but that
1678 * is perfectly fine as coalescing interrupts that haven't
1679 * been presented yet is always allowed.
1680 */
Cédric Le Goaterdc1c4162017-12-12 12:02:04 +00001681 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001682 state->old_p = true;
1683 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1684 state->old_q = true;
1685
1686 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1687
1688 /*
1689 * If the interrupt was unmasked, update guest priority and
1690 * perform the appropriate state transition and do a
1691 * re-trigger if necessary.
1692 */
1693 if (val & KVM_XICS_MASKED) {
1694 pr_devel(" masked, saving prio\n");
1695 state->guest_priority = MASKED;
1696 state->saved_priority = guest_prio;
1697 } else {
1698 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1699 xive_finish_unmask(xive, sb, state, guest_prio);
1700 state->saved_priority = guest_prio;
1701 }
1702
1703 /* Increment the number of valid sources and mark this one valid */
1704 if (!state->valid)
1705 xive->src_count++;
1706 state->valid = true;
1707
1708 return 0;
1709}
1710
1711int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1712 bool line_status)
1713{
1714 struct kvmppc_xive *xive = kvm->arch.xive;
1715 struct kvmppc_xive_src_block *sb;
1716 struct kvmppc_xive_irq_state *state;
1717 u16 idx;
1718
1719 if (!xive)
1720 return -ENODEV;
1721
1722 sb = kvmppc_xive_find_source(xive, irq, &idx);
1723 if (!sb)
1724 return -EINVAL;
1725
1726 /* Perform locklessly .... (we need to do some RCUisms here...) */
1727 state = &sb->irq_state[idx];
1728 if (!state->valid)
1729 return -EINVAL;
1730
1731 /* We don't allow a trigger on a passed-through interrupt */
1732 if (state->pt_number)
1733 return -EINVAL;
1734
1735 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1736 state->asserted = 1;
1737 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1738 state->asserted = 0;
1739 return 0;
1740 }
1741
1742 /* Trigger the IPI */
1743 xive_irq_trigger(&state->ipi_data);
1744
1745 return 0;
1746}
1747
1748static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1749{
1750 struct kvmppc_xive *xive = dev->private;
1751
1752 /* We honor the existing XICS ioctl */
1753 switch (attr->group) {
1754 case KVM_DEV_XICS_GRP_SOURCES:
1755 return xive_set_source(xive, attr->attr, attr->addr);
1756 }
1757 return -ENXIO;
1758}
1759
1760static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1761{
1762 struct kvmppc_xive *xive = dev->private;
1763
1764 /* We honor the existing XICS ioctl */
1765 switch (attr->group) {
1766 case KVM_DEV_XICS_GRP_SOURCES:
1767 return xive_get_source(xive, attr->attr, attr->addr);
1768 }
1769 return -ENXIO;
1770}
1771
1772static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1773{
1774 /* We honor the same limits as XICS, at least for now */
1775 switch (attr->group) {
1776 case KVM_DEV_XICS_GRP_SOURCES:
1777 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1778 attr->attr < KVMPPC_XICS_NR_IRQS)
1779 return 0;
1780 break;
1781 }
1782 return -ENXIO;
1783}
1784
1785static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1786{
1787 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1788 xive_native_configure_irq(hw_num, 0, MASKED, 0);
1789 xive_cleanup_irq_data(xd);
1790}
1791
1792static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1793{
1794 int i;
1795
1796 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1797 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1798
1799 if (!state->valid)
1800 continue;
1801
1802 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1803 xive_native_free_irq(state->ipi_number);
1804
1805 /* Pass-through, cleanup too */
1806 if (state->pt_number)
1807 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1808
1809 state->valid = false;
1810 }
1811}
1812
1813static void kvmppc_xive_free(struct kvm_device *dev)
1814{
1815 struct kvmppc_xive *xive = dev->private;
1816 struct kvm *kvm = xive->kvm;
1817 int i;
1818
1819 debugfs_remove(xive->dentry);
1820
1821 if (kvm)
1822 kvm->arch.xive = NULL;
1823
1824 /* Mask and free interrupts */
1825 for (i = 0; i <= xive->max_sbid; i++) {
1826 if (xive->src_blocks[i])
1827 kvmppc_xive_free_sources(xive->src_blocks[i]);
1828 kfree(xive->src_blocks[i]);
1829 xive->src_blocks[i] = NULL;
1830 }
1831
1832 if (xive->vp_base != XIVE_INVALID_VP)
1833 xive_native_free_vp_block(xive->vp_base);
1834
1835
1836 kfree(xive);
1837 kfree(dev);
1838}
1839
1840static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
1841{
1842 struct kvmppc_xive *xive;
1843 struct kvm *kvm = dev->kvm;
1844 int ret = 0;
1845
1846 pr_devel("Creating xive for partition\n");
1847
1848 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
1849 if (!xive)
1850 return -ENOMEM;
1851
1852 dev->private = xive;
1853 xive->dev = dev;
1854 xive->kvm = kvm;
1855
1856 /* Already there ? */
1857 if (kvm->arch.xive)
1858 ret = -EEXIST;
1859 else
1860 kvm->arch.xive = xive;
1861
1862 /* We use the default queue size set by the host */
1863 xive->q_order = xive_native_default_eq_shift();
1864 if (xive->q_order < PAGE_SHIFT)
1865 xive->q_page_order = 0;
1866 else
1867 xive->q_page_order = xive->q_order - PAGE_SHIFT;
1868
1869 /* Allocate a bunch of VPs */
1870 xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
1871 pr_devel("VP_Base=%x\n", xive->vp_base);
1872
1873 if (xive->vp_base == XIVE_INVALID_VP)
1874 ret = -ENOMEM;
1875
Benjamin Herrenschmidtbf4159da2018-01-12 13:37:12 +11001876 xive->single_escalation = xive_native_has_single_escalation();
1877
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001878 if (ret) {
1879 kfree(xive);
1880 return ret;
1881 }
1882
1883 return 0;
1884}
1885
1886
1887static int xive_debug_show(struct seq_file *m, void *private)
1888{
1889 struct kvmppc_xive *xive = m->private;
1890 struct kvm *kvm = xive->kvm;
1891 struct kvm_vcpu *vcpu;
1892 u64 t_rm_h_xirr = 0;
1893 u64 t_rm_h_ipoll = 0;
1894 u64 t_rm_h_cppr = 0;
1895 u64 t_rm_h_eoi = 0;
1896 u64 t_rm_h_ipi = 0;
1897 u64 t_vm_h_xirr = 0;
1898 u64 t_vm_h_ipoll = 0;
1899 u64 t_vm_h_cppr = 0;
1900 u64 t_vm_h_eoi = 0;
1901 u64 t_vm_h_ipi = 0;
1902 unsigned int i;
1903
1904 if (!kvm)
1905 return 0;
1906
1907 seq_printf(m, "=========\nVCPU state\n=========\n");
1908
1909 kvm_for_each_vcpu(i, vcpu, kvm) {
1910 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
Benjamin Herrenschmidtc424c102018-01-12 13:37:11 +11001911 unsigned int i;
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001912
1913 if (!xc)
1914 continue;
1915
1916 seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
1917 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
1918 xc->server_num, xc->cppr, xc->hw_cppr,
1919 xc->mfrr, xc->pending,
1920 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
Benjamin Herrenschmidtc424c102018-01-12 13:37:11 +11001921 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1922 struct xive_q *q = &xc->queues[i];
1923 u32 i0, i1, idx;
1924
1925 if (!q->qpage && !xc->esc_virq[i])
1926 continue;
1927
1928 seq_printf(m, " [q%d]: ", i);
1929
1930 if (q->qpage) {
1931 idx = q->idx;
1932 i0 = be32_to_cpup(q->qpage + idx);
1933 idx = (idx + 1) & q->msk;
1934 i1 = be32_to_cpup(q->qpage + idx);
1935 seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1);
1936 }
1937 if (xc->esc_virq[i]) {
1938 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
1939 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1940 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
1941 seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
1942 (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
1943 (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
1944 xc->esc_virq[i], pq, xd->eoi_page);
1945 seq_printf(m, "\n");
1946 }
1947 }
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001948
1949 t_rm_h_xirr += xc->stat_rm_h_xirr;
1950 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
1951 t_rm_h_cppr += xc->stat_rm_h_cppr;
1952 t_rm_h_eoi += xc->stat_rm_h_eoi;
1953 t_rm_h_ipi += xc->stat_rm_h_ipi;
1954 t_vm_h_xirr += xc->stat_vm_h_xirr;
1955 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
1956 t_vm_h_cppr += xc->stat_vm_h_cppr;
1957 t_vm_h_eoi += xc->stat_vm_h_eoi;
1958 t_vm_h_ipi += xc->stat_vm_h_ipi;
1959 }
1960
1961 seq_printf(m, "Hcalls totals\n");
1962 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
1963 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
1964 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
1965 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
1966 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
1967
1968 return 0;
1969}
1970
Yangtao Li0f6ddf32018-11-05 09:47:17 -05001971DEFINE_SHOW_ATTRIBUTE(xive_debug);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001972
1973static void xive_debugfs_init(struct kvmppc_xive *xive)
1974{
1975 char *name;
1976
1977 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
1978 if (!name) {
1979 pr_err("%s: no memory for name\n", __func__);
1980 return;
1981 }
1982
1983 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
1984 xive, &xive_debug_fops);
1985
1986 pr_debug("%s: created %s\n", __func__, name);
1987 kfree(name);
1988}
1989
1990static void kvmppc_xive_init(struct kvm_device *dev)
1991{
1992 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
1993
1994 /* Register some debug interfaces */
1995 xive_debugfs_init(xive);
1996}
1997
1998struct kvm_device_ops kvm_xive_ops = {
1999 .name = "kvm-xive",
2000 .create = kvmppc_xive_create,
2001 .init = kvmppc_xive_init,
2002 .destroy = kvmppc_xive_free,
2003 .set_attr = xive_set_attr,
2004 .get_attr = xive_get_attr,
2005 .has_attr = xive_has_attr,
2006};
2007
2008void kvmppc_xive_init_module(void)
2009{
2010 __xive_vm_h_xirr = xive_vm_h_xirr;
2011 __xive_vm_h_ipoll = xive_vm_h_ipoll;
2012 __xive_vm_h_ipi = xive_vm_h_ipi;
2013 __xive_vm_h_cppr = xive_vm_h_cppr;
2014 __xive_vm_h_eoi = xive_vm_h_eoi;
2015}
2016
2017void kvmppc_xive_exit_module(void)
2018{
2019 __xive_vm_h_xirr = NULL;
2020 __xive_vm_h_ipoll = NULL;
2021 __xive_vm_h_ipi = NULL;
2022 __xive_vm_h_cppr = NULL;
2023 __xive_vm_h_eoi = NULL;
2024}