blob: b7b901da2168871fb3151d7e9742f23ed743b004 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Benjamin Herrenschmidt243e2512017-04-05 17:54:50 +10002/*
3 * Copyright 2016,2017 IBM Corporation.
Benjamin Herrenschmidt243e2512017-04-05 17:54:50 +10004 */
5#ifndef __XIVE_INTERNAL_H
6#define __XIVE_INTERNAL_H
7
Cédric Le Goaterb1a504a2020-03-06 16:01:40 +01008/*
9 * A "disabled" interrupt should never fire, to catch problems
10 * we set its logical number to this
11 */
12#define XIVE_BAD_IRQ 0x7fffffff
13#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
14
Benjamin Herrenschmidt243e2512017-04-05 17:54:50 +100015/* Each CPU carry one of these with various per-CPU state */
16struct xive_cpu {
17#ifdef CONFIG_SMP
18 /* HW irq number and data of IPI */
19 u32 hw_ipi;
20 struct xive_irq_data ipi_data;
21#endif /* CONFIG_SMP */
22
23 int chip_id;
24
25 /* Queue datas. Only one is populated */
26#define XIVE_MAX_QUEUES 8
27 struct xive_q queue[XIVE_MAX_QUEUES];
28
29 /*
30 * Pending mask. Each bit corresponds to a priority that
31 * potentially has pending interrupts.
32 */
33 u8 pending_prio;
34
35 /* Cache of HW CPPR */
36 u8 cppr;
37};
38
39/* Backend ops */
40struct xive_ops {
41 int (*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
42 int (*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
Cédric Le Goaterb4868ff2019-08-14 17:47:53 +020043 int (*get_irq_config)(u32 hw_irq, u32 *target, u8 *prio,
44 u32 *sw_irq);
Benjamin Herrenschmidt243e2512017-04-05 17:54:50 +100045 int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
46 void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
47 void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
48 void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
49 bool (*match)(struct device_node *np);
50 void (*shutdown)(void);
51
52 void (*update_pending)(struct xive_cpu *xc);
53 void (*eoi)(u32 hw_irq);
54 void (*sync_source)(u32 hw_irq);
Cédric Le Goaterbed81ee2017-08-30 21:46:15 +020055 u64 (*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write);
Benjamin Herrenschmidt243e2512017-04-05 17:54:50 +100056#ifdef CONFIG_SMP
57 int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
58 void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
59#endif
Cédric Le Goater930914b2020-03-06 16:01:43 +010060 int (*debug_show)(struct seq_file *m, void *private);
Benjamin Herrenschmidt243e2512017-04-05 17:54:50 +100061 const char *name;
62};
63
64bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
65 u8 max_prio);
Cédric Le Goater994ea2f2017-08-30 21:46:10 +020066__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
Cédric Le Goater930914b2020-03-06 16:01:43 +010067int xive_core_debug_init(void);
Cédric Le Goater994ea2f2017-08-30 21:46:10 +020068
69static inline u32 xive_alloc_order(u32 queue_shift)
70{
71 return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
72}
Benjamin Herrenschmidt243e2512017-04-05 17:54:50 +100073
74extern bool xive_cmdline_disabled;
75
76#endif /* __XIVE_INTERNAL_H */