David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2016, 2017 Cavium Inc. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/bitops.h> |
| 10 | #include <linux/gpio/driver.h> |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/irq.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/pci.h> |
| 17 | #include <linux/spinlock.h> |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 18 | #include <asm-generic/msi.h> |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 19 | |
| 20 | |
| 21 | #define GPIO_RX_DAT 0x0 |
| 22 | #define GPIO_TX_SET 0x8 |
| 23 | #define GPIO_TX_CLR 0x10 |
| 24 | #define GPIO_CONST 0x90 |
| 25 | #define GPIO_CONST_GPIOS_MASK 0xff |
| 26 | #define GPIO_BIT_CFG 0x400 |
| 27 | #define GPIO_BIT_CFG_TX_OE BIT(0) |
| 28 | #define GPIO_BIT_CFG_PIN_XOR BIT(1) |
| 29 | #define GPIO_BIT_CFG_INT_EN BIT(2) |
| 30 | #define GPIO_BIT_CFG_INT_TYPE BIT(3) |
| 31 | #define GPIO_BIT_CFG_FIL_MASK GENMASK(11, 4) |
| 32 | #define GPIO_BIT_CFG_FIL_CNT_SHIFT 4 |
| 33 | #define GPIO_BIT_CFG_FIL_SEL_SHIFT 8 |
| 34 | #define GPIO_BIT_CFG_TX_OD BIT(12) |
| 35 | #define GPIO_BIT_CFG_PIN_SEL_MASK GENMASK(25, 16) |
| 36 | #define GPIO_INTR 0x800 |
| 37 | #define GPIO_INTR_INTR BIT(0) |
| 38 | #define GPIO_INTR_INTR_W1S BIT(1) |
| 39 | #define GPIO_INTR_ENA_W1C BIT(2) |
| 40 | #define GPIO_INTR_ENA_W1S BIT(3) |
| 41 | #define GPIO_2ND_BANK 0x1400 |
| 42 | |
| 43 | #define GLITCH_FILTER_400NS ((4u << GPIO_BIT_CFG_FIL_SEL_SHIFT) | \ |
| 44 | (9u << GPIO_BIT_CFG_FIL_CNT_SHIFT)) |
| 45 | |
| 46 | struct thunderx_gpio; |
| 47 | |
| 48 | struct thunderx_line { |
| 49 | struct thunderx_gpio *txgpio; |
| 50 | unsigned int line; |
| 51 | unsigned int fil_bits; |
| 52 | }; |
| 53 | |
| 54 | struct thunderx_gpio { |
| 55 | struct gpio_chip chip; |
| 56 | u8 __iomem *register_base; |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 57 | struct msix_entry *msix_entries; /* per line MSI-X */ |
| 58 | struct thunderx_line *line_entries; /* per line irq info */ |
| 59 | raw_spinlock_t lock; |
| 60 | unsigned long invert_mask[2]; |
| 61 | unsigned long od_mask[2]; |
| 62 | int base_msi; |
| 63 | }; |
| 64 | |
| 65 | static unsigned int bit_cfg_reg(unsigned int line) |
| 66 | { |
| 67 | return 8 * line + GPIO_BIT_CFG; |
| 68 | } |
| 69 | |
| 70 | static unsigned int intr_reg(unsigned int line) |
| 71 | { |
| 72 | return 8 * line + GPIO_INTR; |
| 73 | } |
| 74 | |
| 75 | static bool thunderx_gpio_is_gpio_nowarn(struct thunderx_gpio *txgpio, |
| 76 | unsigned int line) |
| 77 | { |
| 78 | u64 bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line)); |
| 79 | |
| 80 | return (bit_cfg & GPIO_BIT_CFG_PIN_SEL_MASK) == 0; |
| 81 | } |
| 82 | |
| 83 | /* |
| 84 | * Check (and WARN) that the pin is available for GPIO. We will not |
| 85 | * allow modification of the state of non-GPIO pins from this driver. |
| 86 | */ |
| 87 | static bool thunderx_gpio_is_gpio(struct thunderx_gpio *txgpio, |
| 88 | unsigned int line) |
| 89 | { |
| 90 | bool rv = thunderx_gpio_is_gpio_nowarn(txgpio, line); |
| 91 | |
| 92 | WARN_RATELIMIT(!rv, "Pin %d not available for GPIO\n", line); |
| 93 | |
| 94 | return rv; |
| 95 | } |
| 96 | |
| 97 | static int thunderx_gpio_request(struct gpio_chip *chip, unsigned int line) |
| 98 | { |
| 99 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 100 | |
| 101 | return thunderx_gpio_is_gpio(txgpio, line) ? 0 : -EIO; |
| 102 | } |
| 103 | |
| 104 | static int thunderx_gpio_dir_in(struct gpio_chip *chip, unsigned int line) |
| 105 | { |
| 106 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 107 | |
| 108 | if (!thunderx_gpio_is_gpio(txgpio, line)) |
| 109 | return -EIO; |
| 110 | |
| 111 | raw_spin_lock(&txgpio->lock); |
| 112 | clear_bit(line, txgpio->invert_mask); |
| 113 | clear_bit(line, txgpio->od_mask); |
| 114 | writeq(txgpio->line_entries[line].fil_bits, |
| 115 | txgpio->register_base + bit_cfg_reg(line)); |
| 116 | raw_spin_unlock(&txgpio->lock); |
| 117 | return 0; |
| 118 | } |
| 119 | |
| 120 | static void thunderx_gpio_set(struct gpio_chip *chip, unsigned int line, |
| 121 | int value) |
| 122 | { |
| 123 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 124 | int bank = line / 64; |
| 125 | int bank_bit = line % 64; |
| 126 | |
| 127 | void __iomem *reg = txgpio->register_base + |
| 128 | (bank * GPIO_2ND_BANK) + (value ? GPIO_TX_SET : GPIO_TX_CLR); |
| 129 | |
| 130 | writeq(BIT_ULL(bank_bit), reg); |
| 131 | } |
| 132 | |
| 133 | static int thunderx_gpio_dir_out(struct gpio_chip *chip, unsigned int line, |
| 134 | int value) |
| 135 | { |
| 136 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 137 | u64 bit_cfg = txgpio->line_entries[line].fil_bits | GPIO_BIT_CFG_TX_OE; |
| 138 | |
| 139 | if (!thunderx_gpio_is_gpio(txgpio, line)) |
| 140 | return -EIO; |
| 141 | |
| 142 | raw_spin_lock(&txgpio->lock); |
| 143 | |
| 144 | thunderx_gpio_set(chip, line, value); |
| 145 | |
| 146 | if (test_bit(line, txgpio->invert_mask)) |
| 147 | bit_cfg |= GPIO_BIT_CFG_PIN_XOR; |
| 148 | |
| 149 | if (test_bit(line, txgpio->od_mask)) |
| 150 | bit_cfg |= GPIO_BIT_CFG_TX_OD; |
| 151 | |
| 152 | writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(line)); |
| 153 | |
| 154 | raw_spin_unlock(&txgpio->lock); |
| 155 | return 0; |
| 156 | } |
| 157 | |
| 158 | static int thunderx_gpio_get_direction(struct gpio_chip *chip, unsigned int line) |
| 159 | { |
| 160 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 161 | u64 bit_cfg; |
| 162 | |
| 163 | if (!thunderx_gpio_is_gpio_nowarn(txgpio, line)) |
| 164 | /* |
| 165 | * Say it is input for now to avoid WARNing on |
| 166 | * gpiochip_add_data(). We will WARN if someone |
| 167 | * requests it or tries to use it. |
| 168 | */ |
| 169 | return 1; |
| 170 | |
| 171 | bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line)); |
| 172 | |
Matti Vaittinen | e42615e | 2019-11-06 10:54:12 +0200 | [diff] [blame] | 173 | if (bit_cfg & GPIO_BIT_CFG_TX_OE) |
| 174 | return GPIO_LINE_DIRECTION_OUT; |
| 175 | |
| 176 | return GPIO_LINE_DIRECTION_IN; |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 177 | } |
| 178 | |
| 179 | static int thunderx_gpio_set_config(struct gpio_chip *chip, |
| 180 | unsigned int line, |
| 181 | unsigned long cfg) |
| 182 | { |
| 183 | bool orig_invert, orig_od, orig_dat, new_invert, new_od; |
| 184 | u32 arg, sel; |
| 185 | u64 bit_cfg; |
| 186 | int bank = line / 64; |
| 187 | int bank_bit = line % 64; |
| 188 | int ret = -ENOTSUPP; |
| 189 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 190 | void __iomem *reg = txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET; |
| 191 | |
| 192 | if (!thunderx_gpio_is_gpio(txgpio, line)) |
| 193 | return -EIO; |
| 194 | |
| 195 | raw_spin_lock(&txgpio->lock); |
| 196 | orig_invert = test_bit(line, txgpio->invert_mask); |
| 197 | new_invert = orig_invert; |
| 198 | orig_od = test_bit(line, txgpio->od_mask); |
| 199 | new_od = orig_od; |
| 200 | orig_dat = ((readq(reg) >> bank_bit) & 1) ^ orig_invert; |
| 201 | bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line)); |
| 202 | switch (pinconf_to_config_param(cfg)) { |
| 203 | case PIN_CONFIG_DRIVE_OPEN_DRAIN: |
| 204 | /* |
| 205 | * Weird, setting open-drain mode causes signal |
| 206 | * inversion. Note this so we can compensate in the |
| 207 | * dir_out function. |
| 208 | */ |
| 209 | set_bit(line, txgpio->invert_mask); |
| 210 | new_invert = true; |
| 211 | set_bit(line, txgpio->od_mask); |
| 212 | new_od = true; |
| 213 | ret = 0; |
| 214 | break; |
| 215 | case PIN_CONFIG_DRIVE_PUSH_PULL: |
| 216 | clear_bit(line, txgpio->invert_mask); |
| 217 | new_invert = false; |
| 218 | clear_bit(line, txgpio->od_mask); |
| 219 | new_od = false; |
| 220 | ret = 0; |
| 221 | break; |
| 222 | case PIN_CONFIG_INPUT_DEBOUNCE: |
| 223 | arg = pinconf_to_config_argument(cfg); |
| 224 | if (arg > 1228) { /* 15 * 2^15 * 2.5nS maximum */ |
| 225 | ret = -EINVAL; |
| 226 | break; |
| 227 | } |
| 228 | arg *= 400; /* scale to 2.5nS clocks. */ |
| 229 | sel = 0; |
| 230 | while (arg > 15) { |
| 231 | sel++; |
| 232 | arg++; /* always round up */ |
| 233 | arg >>= 1; |
| 234 | } |
| 235 | txgpio->line_entries[line].fil_bits = |
| 236 | (sel << GPIO_BIT_CFG_FIL_SEL_SHIFT) | |
| 237 | (arg << GPIO_BIT_CFG_FIL_CNT_SHIFT); |
| 238 | bit_cfg &= ~GPIO_BIT_CFG_FIL_MASK; |
| 239 | bit_cfg |= txgpio->line_entries[line].fil_bits; |
| 240 | writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(line)); |
| 241 | ret = 0; |
| 242 | break; |
| 243 | default: |
| 244 | break; |
| 245 | } |
| 246 | raw_spin_unlock(&txgpio->lock); |
| 247 | |
| 248 | /* |
| 249 | * If currently output and OPEN_DRAIN changed, install the new |
| 250 | * settings |
| 251 | */ |
| 252 | if ((new_invert != orig_invert || new_od != orig_od) && |
| 253 | (bit_cfg & GPIO_BIT_CFG_TX_OE)) |
| 254 | ret = thunderx_gpio_dir_out(chip, line, orig_dat ^ new_invert); |
| 255 | |
| 256 | return ret; |
| 257 | } |
| 258 | |
| 259 | static int thunderx_gpio_get(struct gpio_chip *chip, unsigned int line) |
| 260 | { |
| 261 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 262 | int bank = line / 64; |
| 263 | int bank_bit = line % 64; |
| 264 | u64 read_bits = readq(txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_RX_DAT); |
| 265 | u64 masked_bits = read_bits & BIT_ULL(bank_bit); |
| 266 | |
| 267 | if (test_bit(line, txgpio->invert_mask)) |
| 268 | return masked_bits == 0; |
| 269 | else |
| 270 | return masked_bits != 0; |
| 271 | } |
| 272 | |
| 273 | static void thunderx_gpio_set_multiple(struct gpio_chip *chip, |
| 274 | unsigned long *mask, |
| 275 | unsigned long *bits) |
| 276 | { |
| 277 | int bank; |
| 278 | u64 set_bits, clear_bits; |
| 279 | struct thunderx_gpio *txgpio = gpiochip_get_data(chip); |
| 280 | |
| 281 | for (bank = 0; bank <= chip->ngpio / 64; bank++) { |
| 282 | set_bits = bits[bank] & mask[bank]; |
| 283 | clear_bits = ~bits[bank] & mask[bank]; |
| 284 | writeq(set_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET); |
| 285 | writeq(clear_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_CLR); |
| 286 | } |
| 287 | } |
| 288 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 289 | static void thunderx_gpio_irq_ack(struct irq_data *d) |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 290 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 291 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 292 | struct thunderx_gpio *txgpio = gpiochip_get_data(gc); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 293 | |
| 294 | writeq(GPIO_INTR_INTR, |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 295 | txgpio->register_base + intr_reg(irqd_to_hwirq(d))); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 296 | } |
| 297 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 298 | static void thunderx_gpio_irq_mask(struct irq_data *d) |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 299 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 300 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 301 | struct thunderx_gpio *txgpio = gpiochip_get_data(gc); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 302 | |
| 303 | writeq(GPIO_INTR_ENA_W1C, |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 304 | txgpio->register_base + intr_reg(irqd_to_hwirq(d))); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 305 | } |
| 306 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 307 | static void thunderx_gpio_irq_mask_ack(struct irq_data *d) |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 308 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 309 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 310 | struct thunderx_gpio *txgpio = gpiochip_get_data(gc); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 311 | |
| 312 | writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR, |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 313 | txgpio->register_base + intr_reg(irqd_to_hwirq(d))); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 314 | } |
| 315 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 316 | static void thunderx_gpio_irq_unmask(struct irq_data *d) |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 317 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 318 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 319 | struct thunderx_gpio *txgpio = gpiochip_get_data(gc); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 320 | |
| 321 | writeq(GPIO_INTR_ENA_W1S, |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 322 | txgpio->register_base + intr_reg(irqd_to_hwirq(d))); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 323 | } |
| 324 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 325 | static int thunderx_gpio_irq_set_type(struct irq_data *d, |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 326 | unsigned int flow_type) |
| 327 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 328 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 329 | struct thunderx_gpio *txgpio = gpiochip_get_data(gc); |
| 330 | struct thunderx_line *txline = |
| 331 | &txgpio->line_entries[irqd_to_hwirq(d)]; |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 332 | u64 bit_cfg; |
| 333 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 334 | irqd_set_trigger_type(d, flow_type); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 335 | |
| 336 | bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN; |
| 337 | |
| 338 | if (flow_type & IRQ_TYPE_EDGE_BOTH) { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 339 | irq_set_handler_locked(d, handle_fasteoi_ack_irq); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 340 | bit_cfg |= GPIO_BIT_CFG_INT_TYPE; |
| 341 | } else { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 342 | irq_set_handler_locked(d, handle_fasteoi_mask_irq); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | raw_spin_lock(&txgpio->lock); |
| 346 | if (flow_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)) { |
| 347 | bit_cfg |= GPIO_BIT_CFG_PIN_XOR; |
| 348 | set_bit(txline->line, txgpio->invert_mask); |
| 349 | } else { |
| 350 | clear_bit(txline->line, txgpio->invert_mask); |
| 351 | } |
| 352 | clear_bit(txline->line, txgpio->od_mask); |
| 353 | writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(txline->line)); |
| 354 | raw_spin_unlock(&txgpio->lock); |
| 355 | |
| 356 | return IRQ_SET_MASK_OK; |
| 357 | } |
| 358 | |
| 359 | static void thunderx_gpio_irq_enable(struct irq_data *data) |
| 360 | { |
| 361 | irq_chip_enable_parent(data); |
| 362 | thunderx_gpio_irq_unmask(data); |
| 363 | } |
| 364 | |
| 365 | static void thunderx_gpio_irq_disable(struct irq_data *data) |
| 366 | { |
| 367 | thunderx_gpio_irq_mask(data); |
| 368 | irq_chip_disable_parent(data); |
| 369 | } |
| 370 | |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 371 | /* |
| 372 | * Interrupts are chained from underlying MSI-X vectors. We have |
| 373 | * these irq_chip functions to be able to handle level triggering |
| 374 | * semantics and other acknowledgment tasks associated with the GPIO |
| 375 | * mechanism. |
| 376 | */ |
| 377 | static struct irq_chip thunderx_gpio_irq_chip = { |
| 378 | .name = "GPIO", |
| 379 | .irq_enable = thunderx_gpio_irq_enable, |
| 380 | .irq_disable = thunderx_gpio_irq_disable, |
| 381 | .irq_ack = thunderx_gpio_irq_ack, |
| 382 | .irq_mask = thunderx_gpio_irq_mask, |
| 383 | .irq_mask_ack = thunderx_gpio_irq_mask_ack, |
| 384 | .irq_unmask = thunderx_gpio_irq_unmask, |
| 385 | .irq_eoi = irq_chip_eoi_parent, |
| 386 | .irq_set_affinity = irq_chip_set_affinity_parent, |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 387 | .irq_set_type = thunderx_gpio_irq_set_type, |
| 388 | |
| 389 | .flags = IRQCHIP_SET_TYPE_MASKED |
| 390 | }; |
| 391 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 392 | static int thunderx_gpio_child_to_parent_hwirq(struct gpio_chip *gc, |
| 393 | unsigned int child, |
| 394 | unsigned int child_type, |
| 395 | unsigned int *parent, |
| 396 | unsigned int *parent_type) |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 397 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 398 | struct thunderx_gpio *txgpio = gpiochip_get_data(gc); |
| 399 | struct irq_data *irqd; |
| 400 | unsigned int irq; |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 401 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 402 | irq = txgpio->msix_entries[child].vector; |
| 403 | irqd = irq_domain_get_irq_data(gc->irq.parent_domain, irq); |
| 404 | if (!irqd) |
Kevin Hao | a564ac3 | 2020-01-14 16:28:18 +0800 | [diff] [blame] | 405 | return -EINVAL; |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 406 | *parent = irqd_to_hwirq(irqd); |
| 407 | *parent_type = IRQ_TYPE_LEVEL_HIGH; |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 408 | return 0; |
| 409 | } |
| 410 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 411 | static void *thunderx_gpio_populate_parent_alloc_info(struct gpio_chip *chip, |
| 412 | unsigned int parent_hwirq, |
| 413 | unsigned int parent_type) |
Kevin Hao | a564ac3 | 2020-01-14 16:28:18 +0800 | [diff] [blame] | 414 | { |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 415 | msi_alloc_info_t *info; |
Kevin Hao | a564ac3 | 2020-01-14 16:28:18 +0800 | [diff] [blame] | 416 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 417 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
| 418 | if (!info) |
| 419 | return NULL; |
Kevin Hao | a564ac3 | 2020-01-14 16:28:18 +0800 | [diff] [blame] | 420 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 421 | info->hwirq = parent_hwirq; |
| 422 | return info; |
Kevin Hao | a564ac3 | 2020-01-14 16:28:18 +0800 | [diff] [blame] | 423 | } |
| 424 | |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 425 | static int thunderx_gpio_probe(struct pci_dev *pdev, |
| 426 | const struct pci_device_id *id) |
| 427 | { |
| 428 | void __iomem * const *tbl; |
| 429 | struct device *dev = &pdev->dev; |
| 430 | struct thunderx_gpio *txgpio; |
| 431 | struct gpio_chip *chip; |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 432 | struct gpio_irq_chip *girq; |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 433 | int ngpio, i; |
| 434 | int err = 0; |
| 435 | |
| 436 | txgpio = devm_kzalloc(dev, sizeof(*txgpio), GFP_KERNEL); |
| 437 | if (!txgpio) |
| 438 | return -ENOMEM; |
| 439 | |
| 440 | raw_spin_lock_init(&txgpio->lock); |
| 441 | chip = &txgpio->chip; |
| 442 | |
| 443 | pci_set_drvdata(pdev, txgpio); |
| 444 | |
| 445 | err = pcim_enable_device(pdev); |
| 446 | if (err) { |
| 447 | dev_err(dev, "Failed to enable PCI device: err %d\n", err); |
| 448 | goto out; |
| 449 | } |
| 450 | |
| 451 | err = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); |
| 452 | if (err) { |
| 453 | dev_err(dev, "Failed to iomap PCI device: err %d\n", err); |
| 454 | goto out; |
| 455 | } |
| 456 | |
| 457 | tbl = pcim_iomap_table(pdev); |
| 458 | txgpio->register_base = tbl[0]; |
| 459 | if (!txgpio->register_base) { |
| 460 | dev_err(dev, "Cannot map PCI resource\n"); |
| 461 | err = -ENOMEM; |
| 462 | goto out; |
| 463 | } |
| 464 | |
| 465 | if (pdev->subsystem_device == 0xa10a) { |
| 466 | /* CN88XX has no GPIO_CONST register*/ |
| 467 | ngpio = 50; |
| 468 | txgpio->base_msi = 48; |
| 469 | } else { |
| 470 | u64 c = readq(txgpio->register_base + GPIO_CONST); |
| 471 | |
| 472 | ngpio = c & GPIO_CONST_GPIOS_MASK; |
| 473 | txgpio->base_msi = (c >> 8) & 0xff; |
| 474 | } |
| 475 | |
Kees Cook | a86854d | 2018-06-12 14:07:58 -0700 | [diff] [blame] | 476 | txgpio->msix_entries = devm_kcalloc(dev, |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 477 | ngpio, sizeof(struct msix_entry), |
| 478 | GFP_KERNEL); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 479 | if (!txgpio->msix_entries) { |
| 480 | err = -ENOMEM; |
| 481 | goto out; |
| 482 | } |
| 483 | |
Kees Cook | a86854d | 2018-06-12 14:07:58 -0700 | [diff] [blame] | 484 | txgpio->line_entries = devm_kcalloc(dev, |
| 485 | ngpio, |
| 486 | sizeof(struct thunderx_line), |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 487 | GFP_KERNEL); |
| 488 | if (!txgpio->line_entries) { |
| 489 | err = -ENOMEM; |
| 490 | goto out; |
| 491 | } |
| 492 | |
| 493 | for (i = 0; i < ngpio; i++) { |
| 494 | u64 bit_cfg = readq(txgpio->register_base + bit_cfg_reg(i)); |
| 495 | |
| 496 | txgpio->msix_entries[i].entry = txgpio->base_msi + (2 * i); |
| 497 | txgpio->line_entries[i].line = i; |
| 498 | txgpio->line_entries[i].txgpio = txgpio; |
| 499 | /* |
| 500 | * If something has already programmed the pin, use |
| 501 | * the existing glitch filter settings, otherwise go |
| 502 | * to 400nS. |
| 503 | */ |
| 504 | txgpio->line_entries[i].fil_bits = bit_cfg ? |
| 505 | (bit_cfg & GPIO_BIT_CFG_FIL_MASK) : GLITCH_FILTER_400NS; |
| 506 | |
| 507 | if ((bit_cfg & GPIO_BIT_CFG_TX_OE) && (bit_cfg & GPIO_BIT_CFG_TX_OD)) |
| 508 | set_bit(i, txgpio->od_mask); |
| 509 | if (bit_cfg & GPIO_BIT_CFG_PIN_XOR) |
| 510 | set_bit(i, txgpio->invert_mask); |
| 511 | } |
| 512 | |
| 513 | |
| 514 | /* Enable all MSI-X for interrupts on all possible lines. */ |
| 515 | err = pci_enable_msix_range(pdev, txgpio->msix_entries, ngpio, ngpio); |
| 516 | if (err < 0) |
| 517 | goto out; |
| 518 | |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 519 | chip->label = KBUILD_MODNAME; |
| 520 | chip->parent = dev; |
| 521 | chip->owner = THIS_MODULE; |
| 522 | chip->request = thunderx_gpio_request; |
| 523 | chip->base = -1; /* System allocated */ |
| 524 | chip->can_sleep = false; |
| 525 | chip->ngpio = ngpio; |
| 526 | chip->get_direction = thunderx_gpio_get_direction; |
| 527 | chip->direction_input = thunderx_gpio_dir_in; |
| 528 | chip->get = thunderx_gpio_get; |
| 529 | chip->direction_output = thunderx_gpio_dir_out; |
| 530 | chip->set = thunderx_gpio_set; |
| 531 | chip->set_multiple = thunderx_gpio_set_multiple; |
| 532 | chip->set_config = thunderx_gpio_set_config; |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 533 | girq = &chip->irq; |
| 534 | girq->chip = &thunderx_gpio_irq_chip; |
| 535 | girq->fwnode = of_node_to_fwnode(dev->of_node); |
| 536 | girq->parent_domain = |
| 537 | irq_get_irq_data(txgpio->msix_entries[0].vector)->domain; |
| 538 | girq->child_to_parent_hwirq = thunderx_gpio_child_to_parent_hwirq; |
| 539 | girq->populate_parent_alloc_arg = thunderx_gpio_populate_parent_alloc_info; |
| 540 | girq->handler = handle_bad_irq; |
| 541 | girq->default_type = IRQ_TYPE_NONE; |
| 542 | |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 543 | err = devm_gpiochip_add_data(dev, chip, txgpio); |
| 544 | if (err) |
| 545 | goto out; |
| 546 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 547 | /* Push on irq_data and the domain for each line. */ |
| 548 | for (i = 0; i < ngpio; i++) { |
| 549 | struct irq_fwspec fwspec; |
| 550 | |
| 551 | fwspec.fwnode = of_node_to_fwnode(dev->of_node); |
| 552 | fwspec.param_count = 2; |
| 553 | fwspec.param[0] = i; |
| 554 | fwspec.param[1] = IRQ_TYPE_NONE; |
| 555 | err = irq_domain_push_irq(girq->domain, |
| 556 | txgpio->msix_entries[i].vector, |
| 557 | &fwspec); |
| 558 | if (err < 0) |
| 559 | dev_err(dev, "irq_domain_push_irq: %d\n", err); |
| 560 | } |
| 561 | |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 562 | dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n", |
| 563 | ngpio, chip->base); |
| 564 | return 0; |
| 565 | out: |
| 566 | pci_set_drvdata(pdev, NULL); |
| 567 | return err; |
| 568 | } |
| 569 | |
| 570 | static void thunderx_gpio_remove(struct pci_dev *pdev) |
| 571 | { |
| 572 | int i; |
| 573 | struct thunderx_gpio *txgpio = pci_get_drvdata(pdev); |
| 574 | |
| 575 | for (i = 0; i < txgpio->chip.ngpio; i++) |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 576 | irq_domain_pop_irq(txgpio->chip.irq.domain, |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 577 | txgpio->msix_entries[i].vector); |
| 578 | |
Kevin Hao | 7a9f446 | 2020-01-14 16:28:21 +0800 | [diff] [blame] | 579 | irq_domain_remove(txgpio->chip.irq.domain); |
David Daney | 5a2a300 | 2017-08-17 17:53:35 -0700 | [diff] [blame] | 580 | |
| 581 | pci_set_drvdata(pdev, NULL); |
| 582 | } |
| 583 | |
| 584 | static const struct pci_device_id thunderx_gpio_id_table[] = { |
| 585 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA00A) }, |
| 586 | { 0, } /* end of table */ |
| 587 | }; |
| 588 | |
| 589 | MODULE_DEVICE_TABLE(pci, thunderx_gpio_id_table); |
| 590 | |
| 591 | static struct pci_driver thunderx_gpio_driver = { |
| 592 | .name = KBUILD_MODNAME, |
| 593 | .id_table = thunderx_gpio_id_table, |
| 594 | .probe = thunderx_gpio_probe, |
| 595 | .remove = thunderx_gpio_remove, |
| 596 | }; |
| 597 | |
| 598 | module_pci_driver(thunderx_gpio_driver); |
| 599 | |
| 600 | MODULE_DESCRIPTION("Cavium Inc. ThunderX/OCTEON-TX GPIO Driver"); |
| 601 | MODULE_LICENSE("GPL"); |