blob: b6ef1911c1dd16a3060a1e95d08e21f41d3fe8d3 [file] [log] [blame]
Andy Shevchenko875a92b2018-06-29 15:36:34 +03001// SPDX-License-Identifier: GPL-2.0
Mika Westerberg7981c0012015-03-30 17:31:49 +03002/*
3 * Intel pinctrl/GPIO core driver.
4 *
5 * Copyright (C) 2015, Intel Corporation
6 * Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
Mika Westerberg7981c0012015-03-30 17:31:49 +03008 */
9
Andy Shevchenko924cf802018-08-30 19:27:36 +030010#include <linux/acpi.h>
Mika Westerberg7981c0012015-03-30 17:31:49 +030011#include <linux/gpio/driver.h>
Andy Shevchenko66c812d2019-10-25 12:10:28 +030012#include <linux/interrupt.h>
Mika Westerberge57725e2017-01-27 13:07:14 +030013#include <linux/log2.h>
Andy Shevchenko6a33a1d2019-08-07 16:41:50 +030014#include <linux/module.h>
Mika Westerberg7981c0012015-03-30 17:31:49 +030015#include <linux/platform_device.h>
Andy Shevchenko924cf802018-08-30 19:27:36 +030016#include <linux/property.h>
Andy Shevchenko6a33a1d2019-08-07 16:41:50 +030017#include <linux/time.h>
Andy Shevchenko924cf802018-08-30 19:27:36 +030018
Mika Westerberg7981c0012015-03-30 17:31:49 +030019#include <linux/pinctrl/pinctrl.h>
20#include <linux/pinctrl/pinmux.h>
21#include <linux/pinctrl/pinconf.h>
22#include <linux/pinctrl/pinconf-generic.h>
23
Mika Westerbergc538b942016-10-10 16:39:31 +030024#include "../core.h"
Mika Westerberg7981c0012015-03-30 17:31:49 +030025#include "pinctrl-intel.h"
26
Mika Westerberg7981c0012015-03-30 17:31:49 +030027/* Offset from regs */
Mika Westerberge57725e2017-01-27 13:07:14 +030028#define REVID 0x000
29#define REVID_SHIFT 16
30#define REVID_MASK GENMASK(31, 16)
31
Mika Westerberg7981c0012015-03-30 17:31:49 +030032#define PADBAR 0x00c
Mika Westerberg7981c0012015-03-30 17:31:49 +030033
34#define PADOWN_BITS 4
35#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS)
Andy Shevchenkoe58926e2019-04-01 15:06:44 +030036#define PADOWN_MASK(p) (GENMASK(3, 0) << PADOWN_SHIFT(p))
Qipeng Zha99a735b2015-11-30 19:20:16 +080037#define PADOWN_GPP(p) ((p) / 8)
Mika Westerberg7981c0012015-03-30 17:31:49 +030038
39/* Offset from pad_regs */
40#define PADCFG0 0x000
41#define PADCFG0_RXEVCFG_SHIFT 25
Andy Shevchenkoe58926e2019-04-01 15:06:44 +030042#define PADCFG0_RXEVCFG_MASK GENMASK(26, 25)
Mika Westerberg7981c0012015-03-30 17:31:49 +030043#define PADCFG0_RXEVCFG_LEVEL 0
44#define PADCFG0_RXEVCFG_EDGE 1
45#define PADCFG0_RXEVCFG_DISABLED 2
46#define PADCFG0_RXEVCFG_EDGE_BOTH 3
Mika Westerberge57725e2017-01-27 13:07:14 +030047#define PADCFG0_PREGFRXSEL BIT(24)
Mika Westerberg7981c0012015-03-30 17:31:49 +030048#define PADCFG0_RXINV BIT(23)
49#define PADCFG0_GPIROUTIOXAPIC BIT(20)
50#define PADCFG0_GPIROUTSCI BIT(19)
51#define PADCFG0_GPIROUTSMI BIT(18)
52#define PADCFG0_GPIROUTNMI BIT(17)
53#define PADCFG0_PMODE_SHIFT 10
Andy Shevchenkoe58926e2019-04-01 15:06:44 +030054#define PADCFG0_PMODE_MASK GENMASK(13, 10)
Andy Shevchenko4973ddc2019-10-14 12:51:04 +030055#define PADCFG0_PMODE_GPIO 0
Mika Westerberg7981c0012015-03-30 17:31:49 +030056#define PADCFG0_GPIORXDIS BIT(9)
57#define PADCFG0_GPIOTXDIS BIT(8)
58#define PADCFG0_GPIORXSTATE BIT(1)
59#define PADCFG0_GPIOTXSTATE BIT(0)
60
61#define PADCFG1 0x004
62#define PADCFG1_TERM_UP BIT(13)
63#define PADCFG1_TERM_SHIFT 10
Andy Shevchenkoe58926e2019-04-01 15:06:44 +030064#define PADCFG1_TERM_MASK GENMASK(12, 10)
Andy Shevchenkodd262092020-10-14 13:46:37 +030065#define PADCFG1_TERM_20K BIT(2)
66#define PADCFG1_TERM_5K BIT(1)
67#define PADCFG1_TERM_1K BIT(0)
68#define PADCFG1_TERM_833 (BIT(1) | BIT(0))
Mika Westerberg7981c0012015-03-30 17:31:49 +030069
Mika Westerberge57725e2017-01-27 13:07:14 +030070#define PADCFG2 0x008
71#define PADCFG2_DEBEN BIT(0)
72#define PADCFG2_DEBOUNCE_SHIFT 1
73#define PADCFG2_DEBOUNCE_MASK GENMASK(4, 1)
74
Andy Shevchenko6a33a1d2019-08-07 16:41:50 +030075#define DEBOUNCE_PERIOD_NSEC 31250
Mika Westerberge57725e2017-01-27 13:07:14 +030076
Mika Westerberg7981c0012015-03-30 17:31:49 +030077struct intel_pad_context {
78 u32 padcfg0;
79 u32 padcfg1;
Mika Westerberge57725e2017-01-27 13:07:14 +030080 u32 padcfg2;
Mika Westerberg7981c0012015-03-30 17:31:49 +030081};
82
83struct intel_community_context {
84 u32 *intmask;
Chris Chiua0a5f762019-04-15 13:53:58 +080085 u32 *hostown;
Mika Westerberg7981c0012015-03-30 17:31:49 +030086};
87
Mika Westerberg7981c0012015-03-30 17:31:49 +030088#define pin_to_padno(c, p) ((p) - (c)->pin_base)
Mika Westerberg919eb472017-06-06 16:18:17 +030089#define padgroup_offset(g, p) ((p) - (g)->base)
Mika Westerberg7981c0012015-03-30 17:31:49 +030090
91static struct intel_community *intel_get_community(struct intel_pinctrl *pctrl,
Andy Shevchenko04035f72018-09-26 17:50:26 +030092 unsigned int pin)
Mika Westerberg7981c0012015-03-30 17:31:49 +030093{
94 struct intel_community *community;
95 int i;
96
97 for (i = 0; i < pctrl->ncommunities; i++) {
98 community = &pctrl->communities[i];
99 if (pin >= community->pin_base &&
100 pin < community->pin_base + community->npins)
101 return community;
102 }
103
104 dev_warn(pctrl->dev, "failed to find community for pin %u\n", pin);
105 return NULL;
106}
107
Mika Westerberg919eb472017-06-06 16:18:17 +0300108static const struct intel_padgroup *
109intel_community_get_padgroup(const struct intel_community *community,
Andy Shevchenko04035f72018-09-26 17:50:26 +0300110 unsigned int pin)
Mika Westerberg919eb472017-06-06 16:18:17 +0300111{
112 int i;
113
114 for (i = 0; i < community->ngpps; i++) {
115 const struct intel_padgroup *padgrp = &community->gpps[i];
116
117 if (pin >= padgrp->base && pin < padgrp->base + padgrp->size)
118 return padgrp;
119 }
120
121 return NULL;
122}
123
Andy Shevchenko04035f72018-09-26 17:50:26 +0300124static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl,
125 unsigned int pin, unsigned int reg)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300126{
127 const struct intel_community *community;
Andy Shevchenko04035f72018-09-26 17:50:26 +0300128 unsigned int padno;
Mika Westerberge57725e2017-01-27 13:07:14 +0300129 size_t nregs;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300130
131 community = intel_get_community(pctrl, pin);
132 if (!community)
133 return NULL;
134
135 padno = pin_to_padno(community, pin);
Mika Westerberge57725e2017-01-27 13:07:14 +0300136 nregs = (community->features & PINCTRL_FEATURE_DEBOUNCE) ? 4 : 2;
137
Andy Shevchenko7eb7ecd2019-07-23 18:55:14 +0300138 if (reg >= nregs * 4)
Mika Westerberge57725e2017-01-27 13:07:14 +0300139 return NULL;
140
141 return community->pad_regs + reg + padno * nregs * 4;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300142}
143
Andy Shevchenko04035f72018-09-26 17:50:26 +0300144static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned int pin)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300145{
146 const struct intel_community *community;
Mika Westerberg919eb472017-06-06 16:18:17 +0300147 const struct intel_padgroup *padgrp;
Andy Shevchenko04035f72018-09-26 17:50:26 +0300148 unsigned int gpp, offset, gpp_offset;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300149 void __iomem *padown;
150
151 community = intel_get_community(pctrl, pin);
152 if (!community)
153 return false;
154 if (!community->padown_offset)
155 return true;
156
Mika Westerberg919eb472017-06-06 16:18:17 +0300157 padgrp = intel_community_get_padgroup(community, pin);
158 if (!padgrp)
159 return false;
160
161 gpp_offset = padgroup_offset(padgrp, pin);
162 gpp = PADOWN_GPP(gpp_offset);
163 offset = community->padown_offset + padgrp->padown_num * 4 + gpp * 4;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300164 padown = community->regs + offset;
165
Mika Westerberg919eb472017-06-06 16:18:17 +0300166 return !(readl(padown) & PADOWN_MASK(gpp_offset));
Mika Westerberg7981c0012015-03-30 17:31:49 +0300167}
168
Andy Shevchenko04035f72018-09-26 17:50:26 +0300169static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned int pin)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300170{
171 const struct intel_community *community;
Mika Westerberg919eb472017-06-06 16:18:17 +0300172 const struct intel_padgroup *padgrp;
Andy Shevchenko04035f72018-09-26 17:50:26 +0300173 unsigned int offset, gpp_offset;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300174 void __iomem *hostown;
175
176 community = intel_get_community(pctrl, pin);
177 if (!community)
178 return true;
179 if (!community->hostown_offset)
180 return false;
181
Mika Westerberg919eb472017-06-06 16:18:17 +0300182 padgrp = intel_community_get_padgroup(community, pin);
183 if (!padgrp)
184 return true;
185
186 gpp_offset = padgroup_offset(padgrp, pin);
187 offset = community->hostown_offset + padgrp->reg_num * 4;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300188 hostown = community->regs + offset;
189
Mika Westerberg919eb472017-06-06 16:18:17 +0300190 return !(readl(hostown) & BIT(gpp_offset));
Mika Westerberg7981c0012015-03-30 17:31:49 +0300191}
192
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300193/**
194 * enum - Locking variants of the pad configuration
195 *
196 * @PAD_UNLOCKED: pad is fully controlled by the configuration registers
197 * @PAD_LOCKED: pad configuration registers, except TX state, are locked
198 * @PAD_LOCKED_TX: pad configuration TX state is locked
199 * @PAD_LOCKED_FULL: pad configuration registers are locked completely
200 *
201 * Locking is considered as read-only mode for corresponding registers and
202 * their respective fields. That said, TX state bit is locked separately from
203 * the main locking scheme.
204 */
205enum {
206 PAD_UNLOCKED = 0,
207 PAD_LOCKED = 1,
208 PAD_LOCKED_TX = 2,
209 PAD_LOCKED_FULL = PAD_LOCKED | PAD_LOCKED_TX,
210};
211
212static int intel_pad_locked(struct intel_pinctrl *pctrl, unsigned int pin)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300213{
214 struct intel_community *community;
Mika Westerberg919eb472017-06-06 16:18:17 +0300215 const struct intel_padgroup *padgrp;
Andy Shevchenko04035f72018-09-26 17:50:26 +0300216 unsigned int offset, gpp_offset;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300217 u32 value;
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300218 int ret = PAD_UNLOCKED;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300219
220 community = intel_get_community(pctrl, pin);
221 if (!community)
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300222 return PAD_LOCKED_FULL;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300223 if (!community->padcfglock_offset)
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300224 return PAD_UNLOCKED;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300225
Mika Westerberg919eb472017-06-06 16:18:17 +0300226 padgrp = intel_community_get_padgroup(community, pin);
227 if (!padgrp)
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300228 return PAD_LOCKED_FULL;
Mika Westerberg919eb472017-06-06 16:18:17 +0300229
230 gpp_offset = padgroup_offset(padgrp, pin);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300231
232 /*
233 * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad,
234 * the pad is considered unlocked. Any other case means that it is
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300235 * either fully or partially locked.
Mika Westerberg7981c0012015-03-30 17:31:49 +0300236 */
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300237 offset = community->padcfglock_offset + 0 + padgrp->reg_num * 8;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300238 value = readl(community->regs + offset);
Mika Westerberg919eb472017-06-06 16:18:17 +0300239 if (value & BIT(gpp_offset))
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300240 ret |= PAD_LOCKED;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300241
Mika Westerberg919eb472017-06-06 16:18:17 +0300242 offset = community->padcfglock_offset + 4 + padgrp->reg_num * 8;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300243 value = readl(community->regs + offset);
Mika Westerberg919eb472017-06-06 16:18:17 +0300244 if (value & BIT(gpp_offset))
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300245 ret |= PAD_LOCKED_TX;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300246
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300247 return ret;
248}
249
250static bool intel_pad_is_unlocked(struct intel_pinctrl *pctrl, unsigned int pin)
251{
252 return (intel_pad_locked(pctrl, pin) & PAD_LOCKED) == PAD_UNLOCKED;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300253}
254
Andy Shevchenko04035f72018-09-26 17:50:26 +0300255static bool intel_pad_usable(struct intel_pinctrl *pctrl, unsigned int pin)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300256{
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300257 return intel_pad_owned_by_host(pctrl, pin) && intel_pad_is_unlocked(pctrl, pin);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300258}
259
260static int intel_get_groups_count(struct pinctrl_dev *pctldev)
261{
262 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
263
264 return pctrl->soc->ngroups;
265}
266
267static const char *intel_get_group_name(struct pinctrl_dev *pctldev,
Andy Shevchenko04035f72018-09-26 17:50:26 +0300268 unsigned int group)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300269{
270 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
271
272 return pctrl->soc->groups[group].name;
273}
274
Andy Shevchenko04035f72018-09-26 17:50:26 +0300275static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
276 const unsigned int **pins, unsigned int *npins)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300277{
278 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
279
280 *pins = pctrl->soc->groups[group].pins;
281 *npins = pctrl->soc->groups[group].npins;
282 return 0;
283}
284
285static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
Andy Shevchenko04035f72018-09-26 17:50:26 +0300286 unsigned int pin)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300287{
288 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
Mika Westerberge57725e2017-01-27 13:07:14 +0300289 void __iomem *padcfg;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300290 u32 cfg0, cfg1, mode;
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300291 int locked;
292 bool acpi;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300293
294 if (!intel_pad_owned_by_host(pctrl, pin)) {
295 seq_puts(s, "not available");
296 return;
297 }
298
299 cfg0 = readl(intel_get_padcfg(pctrl, pin, PADCFG0));
300 cfg1 = readl(intel_get_padcfg(pctrl, pin, PADCFG1));
301
302 mode = (cfg0 & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
Andy Shevchenko4973ddc2019-10-14 12:51:04 +0300303 if (mode == PADCFG0_PMODE_GPIO)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300304 seq_puts(s, "GPIO ");
305 else
306 seq_printf(s, "mode %d ", mode);
307
308 seq_printf(s, "0x%08x 0x%08x", cfg0, cfg1);
309
Mika Westerberge57725e2017-01-27 13:07:14 +0300310 /* Dump the additional PADCFG registers if available */
311 padcfg = intel_get_padcfg(pctrl, pin, PADCFG2);
312 if (padcfg)
313 seq_printf(s, " 0x%08x", readl(padcfg));
314
Mika Westerberg7981c0012015-03-30 17:31:49 +0300315 locked = intel_pad_locked(pctrl, pin);
Mika Westerberg4341e8a2015-10-21 13:08:44 +0300316 acpi = intel_pad_acpi_mode(pctrl, pin);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300317
318 if (locked || acpi) {
319 seq_puts(s, " [");
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300320 if (locked)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300321 seq_puts(s, "LOCKED");
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300322 if ((locked & PAD_LOCKED_FULL) == PAD_LOCKED_TX)
323 seq_puts(s, " tx");
324 else if ((locked & PAD_LOCKED_FULL) == PAD_LOCKED_FULL)
325 seq_puts(s, " full");
326
327 if (locked && acpi)
328 seq_puts(s, ", ");
329
Mika Westerberg7981c0012015-03-30 17:31:49 +0300330 if (acpi)
331 seq_puts(s, "ACPI");
332 seq_puts(s, "]");
333 }
334}
335
336static const struct pinctrl_ops intel_pinctrl_ops = {
337 .get_groups_count = intel_get_groups_count,
338 .get_group_name = intel_get_group_name,
339 .get_group_pins = intel_get_group_pins,
340 .pin_dbg_show = intel_pin_dbg_show,
341};
342
343static int intel_get_functions_count(struct pinctrl_dev *pctldev)
344{
345 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
346
347 return pctrl->soc->nfunctions;
348}
349
350static const char *intel_get_function_name(struct pinctrl_dev *pctldev,
Andy Shevchenko04035f72018-09-26 17:50:26 +0300351 unsigned int function)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300352{
353 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
354
355 return pctrl->soc->functions[function].name;
356}
357
358static int intel_get_function_groups(struct pinctrl_dev *pctldev,
Andy Shevchenko04035f72018-09-26 17:50:26 +0300359 unsigned int function,
Mika Westerberg7981c0012015-03-30 17:31:49 +0300360 const char * const **groups,
Andy Shevchenko04035f72018-09-26 17:50:26 +0300361 unsigned int * const ngroups)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300362{
363 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
364
365 *groups = pctrl->soc->functions[function].groups;
366 *ngroups = pctrl->soc->functions[function].ngroups;
367 return 0;
368}
369
Andy Shevchenko04035f72018-09-26 17:50:26 +0300370static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev,
371 unsigned int function, unsigned int group)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300372{
373 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
374 const struct intel_pingroup *grp = &pctrl->soc->groups[group];
375 unsigned long flags;
376 int i;
377
Mika Westerberg27d90982016-06-16 11:25:36 +0300378 raw_spin_lock_irqsave(&pctrl->lock, flags);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300379
380 /*
381 * All pins in the groups needs to be accessible and writable
382 * before we can enable the mux for this group.
383 */
384 for (i = 0; i < grp->npins; i++) {
385 if (!intel_pad_usable(pctrl, grp->pins[i])) {
Mika Westerberg27d90982016-06-16 11:25:36 +0300386 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300387 return -EBUSY;
388 }
389 }
390
391 /* Now enable the mux setting for each pin in the group */
392 for (i = 0; i < grp->npins; i++) {
393 void __iomem *padcfg0;
394 u32 value;
395
396 padcfg0 = intel_get_padcfg(pctrl, grp->pins[i], PADCFG0);
397 value = readl(padcfg0);
398
399 value &= ~PADCFG0_PMODE_MASK;
Mika Westerberg1f6b4192017-06-06 16:18:18 +0300400
401 if (grp->modes)
402 value |= grp->modes[i] << PADCFG0_PMODE_SHIFT;
403 else
404 value |= grp->mode << PADCFG0_PMODE_SHIFT;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300405
406 writel(value, padcfg0);
407 }
408
Mika Westerberg27d90982016-06-16 11:25:36 +0300409 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300410
411 return 0;
412}
413
Andy Shevchenko17fab472017-01-02 14:07:22 +0200414static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
415{
416 u32 value;
417
418 value = readl(padcfg0);
419 if (input) {
420 value &= ~PADCFG0_GPIORXDIS;
421 value |= PADCFG0_GPIOTXDIS;
422 } else {
423 value &= ~PADCFG0_GPIOTXDIS;
424 value |= PADCFG0_GPIORXDIS;
425 }
426 writel(value, padcfg0);
427}
428
Andy Shevchenko4973ddc2019-10-14 12:51:04 +0300429static int intel_gpio_get_gpio_mode(void __iomem *padcfg0)
430{
431 return (readl(padcfg0) & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
432}
433
Mika Westerbergf5a26ac2017-11-29 16:25:44 +0300434static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
435{
436 u32 value;
437
Andy Shevchenkoaf7e3ee2020-06-12 17:49:54 +0300438 value = readl(padcfg0);
439
Mika Westerbergf5a26ac2017-11-29 16:25:44 +0300440 /* Put the pad into GPIO mode */
Andy Shevchenkoaf7e3ee2020-06-12 17:49:54 +0300441 value &= ~PADCFG0_PMODE_MASK;
442 value |= PADCFG0_PMODE_GPIO;
443
444 /* Disable input and output buffers */
Andy Shevchenkoe8873c02020-12-08 20:24:03 +0200445 value |= PADCFG0_GPIORXDIS;
446 value |= PADCFG0_GPIOTXDIS;
Andy Shevchenkoaf7e3ee2020-06-12 17:49:54 +0300447
Mika Westerbergf5a26ac2017-11-29 16:25:44 +0300448 /* Disable SCI/SMI/NMI generation */
449 value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
450 value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
Andy Shevchenkoaf7e3ee2020-06-12 17:49:54 +0300451
Mika Westerbergf5a26ac2017-11-29 16:25:44 +0300452 writel(value, padcfg0);
453}
454
Mika Westerberg7981c0012015-03-30 17:31:49 +0300455static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
456 struct pinctrl_gpio_range *range,
Andy Shevchenko04035f72018-09-26 17:50:26 +0300457 unsigned int pin)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300458{
459 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
460 void __iomem *padcfg0;
461 unsigned long flags;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300462
Andy Shevchenkof62cdde2020-06-12 17:49:55 +0300463 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
464
Mika Westerberg27d90982016-06-16 11:25:36 +0300465 raw_spin_lock_irqsave(&pctrl->lock, flags);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300466
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300467 if (!intel_pad_owned_by_host(pctrl, pin)) {
Mika Westerberg27d90982016-06-16 11:25:36 +0300468 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300469 return -EBUSY;
470 }
471
Andy Shevchenko1bd23152019-08-12 19:14:01 +0300472 if (!intel_pad_is_unlocked(pctrl, pin)) {
473 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
474 return 0;
475 }
476
Andy Shevchenko4973ddc2019-10-14 12:51:04 +0300477 /*
478 * If pin is already configured in GPIO mode, we assume that
479 * firmware provides correct settings. In such case we avoid
480 * potential glitches on the pin. Otherwise, for the pin in
481 * alternative mode, consumer has to supply respective flags.
482 */
483 if (intel_gpio_get_gpio_mode(padcfg0) == PADCFG0_PMODE_GPIO) {
484 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
485 return 0;
486 }
487
Mika Westerbergf5a26ac2017-11-29 16:25:44 +0300488 intel_gpio_set_gpio_mode(padcfg0);
Andy Shevchenko4973ddc2019-10-14 12:51:04 +0300489
Andy Shevchenko17fab472017-01-02 14:07:22 +0200490 /* Disable TX buffer and enable RX (this will be input) */
491 __intel_gpio_set_direction(padcfg0, true);
492
Mika Westerberg27d90982016-06-16 11:25:36 +0300493 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300494
495 return 0;
496}
497
498static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
499 struct pinctrl_gpio_range *range,
Andy Shevchenko04035f72018-09-26 17:50:26 +0300500 unsigned int pin, bool input)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300501{
502 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
503 void __iomem *padcfg0;
504 unsigned long flags;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300505
Mika Westerberg7981c0012015-03-30 17:31:49 +0300506 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300507
Andy Shevchenkof62cdde2020-06-12 17:49:55 +0300508 raw_spin_lock_irqsave(&pctrl->lock, flags);
509 __intel_gpio_set_direction(padcfg0, input);
Mika Westerberg27d90982016-06-16 11:25:36 +0300510 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300511
512 return 0;
513}
514
515static const struct pinmux_ops intel_pinmux_ops = {
516 .get_functions_count = intel_get_functions_count,
517 .get_function_name = intel_get_function_name,
518 .get_function_groups = intel_get_function_groups,
519 .set_mux = intel_pinmux_set_mux,
520 .gpio_request_enable = intel_gpio_request_enable,
521 .gpio_set_direction = intel_gpio_set_direction,
522};
523
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300524static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
525 enum pin_config_param param, u32 *arg)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300526{
Mika Westerberg04cc0582017-01-27 13:07:15 +0300527 const struct intel_community *community;
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300528 void __iomem *padcfg1;
Andy Shevchenkoe64fbfa2020-06-12 17:50:00 +0300529 unsigned long flags;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300530 u32 value, term;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300531
Mika Westerberg04cc0582017-01-27 13:07:15 +0300532 community = intel_get_community(pctrl, pin);
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300533 padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1);
Andy Shevchenkoe64fbfa2020-06-12 17:50:00 +0300534
535 raw_spin_lock_irqsave(&pctrl->lock, flags);
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300536 value = readl(padcfg1);
Andy Shevchenkoe64fbfa2020-06-12 17:50:00 +0300537 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300538
Mika Westerberg7981c0012015-03-30 17:31:49 +0300539 term = (value & PADCFG1_TERM_MASK) >> PADCFG1_TERM_SHIFT;
540
541 switch (param) {
542 case PIN_CONFIG_BIAS_DISABLE:
543 if (term)
544 return -EINVAL;
545 break;
546
547 case PIN_CONFIG_BIAS_PULL_UP:
548 if (!term || !(value & PADCFG1_TERM_UP))
549 return -EINVAL;
550
551 switch (term) {
Andy Shevchenkodd262092020-10-14 13:46:37 +0300552 case PADCFG1_TERM_833:
553 *arg = 833;
554 break;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300555 case PADCFG1_TERM_1K:
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300556 *arg = 1000;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300557 break;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300558 case PADCFG1_TERM_5K:
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300559 *arg = 5000;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300560 break;
561 case PADCFG1_TERM_20K:
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300562 *arg = 20000;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300563 break;
564 }
565
566 break;
567
568 case PIN_CONFIG_BIAS_PULL_DOWN:
569 if (!term || value & PADCFG1_TERM_UP)
570 return -EINVAL;
571
572 switch (term) {
Andy Shevchenkodd262092020-10-14 13:46:37 +0300573 case PADCFG1_TERM_833:
574 if (!(community->features & PINCTRL_FEATURE_1K_PD))
575 return -EINVAL;
576 *arg = 833;
577 break;
Mika Westerberg04cc0582017-01-27 13:07:15 +0300578 case PADCFG1_TERM_1K:
579 if (!(community->features & PINCTRL_FEATURE_1K_PD))
580 return -EINVAL;
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300581 *arg = 1000;
Mika Westerberg04cc0582017-01-27 13:07:15 +0300582 break;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300583 case PADCFG1_TERM_5K:
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300584 *arg = 5000;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300585 break;
586 case PADCFG1_TERM_20K:
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300587 *arg = 20000;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300588 break;
589 }
590
591 break;
592
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300593 default:
594 return -EINVAL;
Mika Westerberge57725e2017-01-27 13:07:14 +0300595 }
596
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300597 return 0;
598}
599
600static int intel_config_get_debounce(struct intel_pinctrl *pctrl, unsigned int pin,
601 enum pin_config_param param, u32 *arg)
602{
603 void __iomem *padcfg2;
Andy Shevchenkoe64fbfa2020-06-12 17:50:00 +0300604 unsigned long flags;
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300605 unsigned long v;
606 u32 value2;
607
608 padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2);
609 if (!padcfg2)
610 return -ENOTSUPP;
611
Andy Shevchenkoe64fbfa2020-06-12 17:50:00 +0300612 raw_spin_lock_irqsave(&pctrl->lock, flags);
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300613 value2 = readl(padcfg2);
Andy Shevchenkoe64fbfa2020-06-12 17:50:00 +0300614 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
Andy Shevchenko81ab5542020-06-12 17:49:59 +0300615 if (!(value2 & PADCFG2_DEBEN))
616 return -EINVAL;
617
618 v = (value2 & PADCFG2_DEBOUNCE_MASK) >> PADCFG2_DEBOUNCE_SHIFT;
619 *arg = BIT(v) * DEBOUNCE_PERIOD_NSEC / NSEC_PER_USEC;
620
621 return 0;
622}
623
624static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
625 unsigned long *config)
626{
627 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
628 enum pin_config_param param = pinconf_to_config_param(*config);
629 u32 arg = 0;
630 int ret;
631
632 if (!intel_pad_owned_by_host(pctrl, pin))
633 return -ENOTSUPP;
634
635 switch (param) {
636 case PIN_CONFIG_BIAS_DISABLE:
637 case PIN_CONFIG_BIAS_PULL_UP:
638 case PIN_CONFIG_BIAS_PULL_DOWN:
639 ret = intel_config_get_pull(pctrl, pin, param, &arg);
640 if (ret)
641 return ret;
642 break;
643
644 case PIN_CONFIG_INPUT_DEBOUNCE:
645 ret = intel_config_get_debounce(pctrl, pin, param, &arg);
646 if (ret)
647 return ret;
648 break;
649
Mika Westerberg7981c0012015-03-30 17:31:49 +0300650 default:
651 return -ENOTSUPP;
652 }
653
654 *config = pinconf_to_config_packed(param, arg);
655 return 0;
656}
657
Andy Shevchenko04035f72018-09-26 17:50:26 +0300658static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
Mika Westerberg7981c0012015-03-30 17:31:49 +0300659 unsigned long config)
660{
Andy Shevchenko04035f72018-09-26 17:50:26 +0300661 unsigned int param = pinconf_to_config_param(config);
662 unsigned int arg = pinconf_to_config_argument(config);
Mika Westerberg04cc0582017-01-27 13:07:15 +0300663 const struct intel_community *community;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300664 void __iomem *padcfg1;
665 unsigned long flags;
666 int ret = 0;
667 u32 value;
668
Mika Westerberg04cc0582017-01-27 13:07:15 +0300669 community = intel_get_community(pctrl, pin);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300670 padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1);
Andy Shevchenkof62cdde2020-06-12 17:49:55 +0300671
672 raw_spin_lock_irqsave(&pctrl->lock, flags);
673
Mika Westerberg7981c0012015-03-30 17:31:49 +0300674 value = readl(padcfg1);
675
676 switch (param) {
677 case PIN_CONFIG_BIAS_DISABLE:
678 value &= ~(PADCFG1_TERM_MASK | PADCFG1_TERM_UP);
679 break;
680
681 case PIN_CONFIG_BIAS_PULL_UP:
682 value &= ~PADCFG1_TERM_MASK;
683
684 value |= PADCFG1_TERM_UP;
685
Andy Shevchenkof3c75e72020-10-14 13:46:38 +0300686 /* Set default strength value in case none is given */
687 if (arg == 1)
688 arg = 5000;
689
Mika Westerberg7981c0012015-03-30 17:31:49 +0300690 switch (arg) {
691 case 20000:
692 value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
693 break;
694 case 5000:
695 value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
696 break;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300697 case 1000:
698 value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
699 break;
Andy Shevchenkodd262092020-10-14 13:46:37 +0300700 case 833:
701 value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT;
702 break;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300703 default:
704 ret = -EINVAL;
705 }
706
707 break;
708
709 case PIN_CONFIG_BIAS_PULL_DOWN:
710 value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK);
711
Andy Shevchenkof3c75e72020-10-14 13:46:38 +0300712 /* Set default strength value in case none is given */
713 if (arg == 1)
714 arg = 5000;
715
Mika Westerberg7981c0012015-03-30 17:31:49 +0300716 switch (arg) {
717 case 20000:
718 value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
719 break;
720 case 5000:
721 value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
722 break;
Mika Westerberg04cc0582017-01-27 13:07:15 +0300723 case 1000:
Dan Carpenteraa1dd802017-02-07 16:20:08 +0300724 if (!(community->features & PINCTRL_FEATURE_1K_PD)) {
725 ret = -EINVAL;
726 break;
727 }
Mika Westerberg04cc0582017-01-27 13:07:15 +0300728 value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
729 break;
Andy Shevchenkodd262092020-10-14 13:46:37 +0300730 case 833:
731 if (!(community->features & PINCTRL_FEATURE_1K_PD)) {
732 ret = -EINVAL;
733 break;
734 }
735 value |= PADCFG1_TERM_833 << PADCFG1_TERM_SHIFT;
736 break;
Mika Westerberg7981c0012015-03-30 17:31:49 +0300737 default:
738 ret = -EINVAL;
739 }
740
741 break;
742 }
743
744 if (!ret)
745 writel(value, padcfg1);
746
Mika Westerberg27d90982016-06-16 11:25:36 +0300747 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
Mika Westerberg7981c0012015-03-30 17:31:49 +0300748
749 return ret;
750}
751
Andy Shevchenko04035f72018-09-26 17:50:26 +0300752static int intel_config_set_debounce(struct intel_pinctrl *pctrl,
753 unsigned int pin, unsigned int debounce)
Mika Westerberge57725e2017-01-27 13:07:14 +0300754{
755 void __iomem *padcfg0, *padcfg2;
756 unsigned long flags;
757 u32 value0, value2;
Mika Westerberge57725e2017-01-27 13:07:14 +0300758
759 padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2);
760 if (!padcfg2)
761 return -ENOTSUPP;
762
763 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
764
765 raw_spin_lock_irqsave(&pctrl->lock, flags);
766
767 value0 = readl(padcfg0);
768 value2 = readl(padcfg2);
769
770 /* Disable glitch filter and debouncer */
771 value0 &= ~PADCFG0_PREGFRXSEL;
772 value2 &= ~(PADCFG2_DEBEN | PADCFG2_DEBOUNCE_MASK);
773
774 if (debounce) {
775 unsigned long v;
776
Andy Shevchenko6a33a1d2019-08-07 16:41:50 +0300777 v = order_base_2(debounce * NSEC_PER_USEC / DEBOUNCE_PERIOD_NSEC);
Mika Westerberge57725e2017-01-27 13:07:14 +0300778 if (v < 3 || v > 15) {
Andy Shevchenko8fff0422020-06-12 17:49:58 +0300779 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
780 return -EINVAL;
Mika Westerberge57725e2017-01-27 13:07:14 +0300781 }
Andy Shevchenkobb2f43d2020-06-12 17:49:57 +0300782
783 /* Enable glitch filter and debouncer */
784 value0 |= PADCFG0_PREGFRXSEL;
785 value2 |= v << PADCFG2_DEBOUNCE_SHIFT;
786 value2 |= PADCFG2_DEBEN;
Mika Westerberge57725e2017-01-27 13:07:14 +0300787 }
788
789 writel(value0, padcfg0);
790 writel(value2, padcfg2);
791
Mika Westerberge57725e2017-01-27 13:07:14 +0300792 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
793
Andy Shevchenko8fff0422020-06-12 17:49:58 +0300794 return 0;
Mika Westerberge57725e2017-01-27 13:07:14 +0300795}
796
Andy Shevchenko04035f72018-09-26 17:50:26 +0300797static int intel_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
798 unsigned long *configs, unsigned int nconfigs)
Mika Westerberg7981c0012015-03-30 17:31:49 +0300799{
800 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
801 int i, ret;
802
803 if (!intel_pad_usable(pctrl, pin))
804 return -ENOTSUPP;
805
806 for (i = 0; i < nconfigs; i++) {
807 switch (pinconf_to_config_param(configs[i])) {
808 case PIN_CONFIG_BIAS_DISABLE:
809 case PIN_CONFIG_BIAS_PULL_UP:
810 case PIN_CONFIG_BIAS_PULL_DOWN:
811 ret = intel_config_set_pull(pctrl, pin, configs[i]);
812 if (ret)
813 return ret;
814 break;
815
Mika Westerberge57725e2017-01-27 13:07:14 +0300816 case PIN_CONFIG_INPUT_DEBOUNCE:
817 ret = intel_config_set_debounce(pctrl, pin,
818 pinconf_to_config_argument(configs[i]));
819 if (ret)
820 return ret;
821 break;
822
Mika Westerberg7981c0012015-03-30 17:31:49 +0300823 default:
824 return -ENOTSUPP;
825 }
826 }
827
828 return 0;
829}
830
831static const struct pinconf_ops intel_pinconf_ops = {
832 .is_generic = true,
833 .pin_config_get = intel_config_get,
834 .pin_config_set = intel_config_set,
835};
836
837static const struct pinctrl_desc intel_pinctrl_desc = {
838 .pctlops = &intel_pinctrl_ops,
839 .pmxops = &intel_pinmux_ops,
840 .confops = &intel_pinconf_ops,
841 .owner = THIS_MODULE,
842};
843
Mika Westerberga60eac32017-11-27 16:54:43 +0300844/**
845 * intel_gpio_to_pin() - Translate from GPIO offset to pin number
846 * @pctrl: Pinctrl structure
847 * @offset: GPIO offset from gpiolib
Andy Shevchenko946ffef2018-09-26 17:43:17 +0300848 * @community: Community is filled here if not %NULL
Mika Westerberga60eac32017-11-27 16:54:43 +0300849 * @padgrp: Pad group is filled here if not %NULL
850 *
851 * When coming through gpiolib irqchip, the GPIO offset is not
852 * automatically translated to pinctrl pin number. This function can be
853 * used to find out the corresponding pinctrl pin.
854 */
Andy Shevchenko04035f72018-09-26 17:50:26 +0300855static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset,
Mika Westerberga60eac32017-11-27 16:54:43 +0300856 const struct intel_community **community,
857 const struct intel_padgroup **padgrp)
858{
859 int i;
860
861 for (i = 0; i < pctrl->ncommunities; i++) {
862 const struct intel_community *comm = &pctrl->communities[i];
863 int j;
864
865 for (j = 0; j < comm->ngpps; j++) {
866 const struct intel_padgroup *pgrp = &comm->gpps[j];
867
Andy Shevchenkoe5a4ab62020-04-13 14:18:20 +0300868 if (pgrp->gpio_base == INTEL_GPIO_BASE_NOMAP)
Mika Westerberga60eac32017-11-27 16:54:43 +0300869 continue;
870
871 if (offset >= pgrp->gpio_base &&
872 offset < pgrp->gpio_base + pgrp->size) {
873 int pin;
874
875 pin = pgrp->base + offset - pgrp->gpio_base;
876 if (community)
877 *community = comm;
878 if (padgrp)
879 *padgrp = pgrp;
880
881 return pin;
882 }
883 }
884 }
885
886 return -EINVAL;
887}
888
Chris Chiu6cb08802019-08-16 17:38:38 +0800889/**
890 * intel_pin_to_gpio() - Translate from pin number to GPIO offset
891 * @pctrl: Pinctrl structure
892 * @pin: pin number
893 *
894 * Translate the pin number of pinctrl to GPIO offset
895 */
Arnd Bergmann55dac432019-09-06 20:51:59 +0200896static __maybe_unused int intel_pin_to_gpio(struct intel_pinctrl *pctrl, int pin)
Chris Chiu6cb08802019-08-16 17:38:38 +0800897{
898 const struct intel_community *community;
899 const struct intel_padgroup *padgrp;
900
901 community = intel_get_community(pctrl, pin);
902 if (!community)
903 return -EINVAL;
904
905 padgrp = intel_community_get_padgroup(community, pin);
906 if (!padgrp)
907 return -EINVAL;
908
909 return pin - padgrp->base + padgrp->gpio_base;
910}
911
Andy Shevchenko04035f72018-09-26 17:50:26 +0300912static int intel_gpio_get(struct gpio_chip *chip, unsigned int offset)
Andy Shevchenko55aedef52018-07-25 15:42:08 +0300913{
Mika Westerberg96147db2018-09-18 18:36:21 +0300914 struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
915 void __iomem *reg;
916 u32 padcfg0;
Andy Shevchenko55aedef52018-07-25 15:42:08 +0300917 int pin;
918
Mika Westerberg96147db2018-09-18 18:36:21 +0300919 pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
920 if (pin < 0)
921 return -EINVAL;
922
923 reg = intel_get_padcfg(pctrl, pin, PADCFG0);
924 if (!reg)
925 return -EINVAL;
926
927 padcfg0 = readl(reg);
928 if (!(padcfg0 & PADCFG0_GPIOTXDIS))
929 return !!(padcfg0 & PADCFG0_GPIOTXSTATE);
930
931 return !!(padcfg0 & PADCFG0_GPIORXSTATE);
Andy Shevchenko55aedef52018-07-25 15:42:08 +0300932}
933
Andy Shevchenko04035f72018-09-26 17:50:26 +0300934static void intel_gpio_set(struct gpio_chip *chip, unsigned int offset,
935 int value)
Mika Westerberg96147db2018-09-18 18:36:21 +0300936{
937 struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
938 unsigned long flags;
939 void __iomem *reg;
940 u32 padcfg0;
941 int pin;
942
943 pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
944 if (pin < 0)
945 return;
946
947 reg = intel_get_padcfg(pctrl, pin, PADCFG0);
948 if (!reg)
949 return;
950
951 raw_spin_lock_irqsave(&pctrl->lock, flags);
952 padcfg0 = readl(reg);
953 if (value)
954 padcfg0 |= PADCFG0_GPIOTXSTATE;
955 else
956 padcfg0 &= ~PADCFG0_GPIOTXSTATE;
957 writel(padcfg0, reg);
958 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
959}
960
961static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
962{
963 struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
Andy Shevchenkoe64fbfa2020-06-12 17:50:00 +0300964 unsigned long flags;
Mika Westerberg96147db2018-09-18 18:36:21 +0300965 void __iomem *reg;
966 u32 padcfg0;
967 int pin;
968
969 pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
970 if (pin < 0)
971 return -EINVAL;
972
973 reg = intel_get_padcfg(pctrl, pin, PADCFG0);
974 if (!reg)
975 return -EINVAL;
976
Andy Shevchenkoe64fbfa2020-06-12 17:50:00 +0300977 raw_spin_lock_irqsave(&pctrl->lock, flags);
Mika Westerberg96147db2018-09-18 18:36:21 +0300978 padcfg0 = readl(reg);
Andy Shevchenkoe64fbfa2020-06-12 17:50:00 +0300979 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
Mika Westerberg96147db2018-09-18 18:36:21 +0300980 if (padcfg0 & PADCFG0_PMODE_MASK)
981 return -EINVAL;
982
Matti Vaittinen6a304752019-12-12 08:34:32 +0200983 if (padcfg0 & PADCFG0_GPIOTXDIS)
984 return GPIO_LINE_DIRECTION_IN;
985
986 return GPIO_LINE_DIRECTION_OUT;
Mika Westerberg96147db2018-09-18 18:36:21 +0300987}
988
Andy Shevchenko04035f72018-09-26 17:50:26 +0300989static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
Mika Westerberg96147db2018-09-18 18:36:21 +0300990{
991 return pinctrl_gpio_direction_input(chip->base + offset);
992}
993
Andy Shevchenko04035f72018-09-26 17:50:26 +0300994static int intel_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
Mika Westerberg96147db2018-09-18 18:36:21 +0300995 int value)
996{
997 intel_gpio_set(chip, offset, value);
998 return pinctrl_gpio_direction_output(chip->base + offset);
999}
1000
1001static const struct gpio_chip intel_gpio_chip = {
1002 .owner = THIS_MODULE,
1003 .request = gpiochip_generic_request,
1004 .free = gpiochip_generic_free,
1005 .get_direction = intel_gpio_get_direction,
1006 .direction_input = intel_gpio_direction_input,
1007 .direction_output = intel_gpio_direction_output,
1008 .get = intel_gpio_get,
1009 .set = intel_gpio_set,
1010 .set_config = gpiochip_generic_config,
1011};
1012
Mika Westerberg7981c0012015-03-30 17:31:49 +03001013static void intel_gpio_irq_ack(struct irq_data *d)
1014{
1015 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
Linus Walleijacfd4c62015-12-08 00:18:59 +01001016 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001017 const struct intel_community *community;
Mika Westerberga60eac32017-11-27 16:54:43 +03001018 const struct intel_padgroup *padgrp;
1019 int pin;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001020
Mika Westerberga60eac32017-11-27 16:54:43 +03001021 pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), &community, &padgrp);
1022 if (pin >= 0) {
Andy Shevchenko04035f72018-09-26 17:50:26 +03001023 unsigned int gpp, gpp_offset, is_offset;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001024
Mika Westerberg919eb472017-06-06 16:18:17 +03001025 gpp = padgrp->reg_num;
1026 gpp_offset = padgroup_offset(padgrp, pin);
Mika Westerbergcf769bd2017-10-23 15:40:25 +03001027 is_offset = community->is_offset + gpp * 4;
Mika Westerberg919eb472017-06-06 16:18:17 +03001028
1029 raw_spin_lock(&pctrl->lock);
Mika Westerbergcf769bd2017-10-23 15:40:25 +03001030 writel(BIT(gpp_offset), community->regs + is_offset);
Mika Westerberg919eb472017-06-06 16:18:17 +03001031 raw_spin_unlock(&pctrl->lock);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001032 }
Mika Westerberg7981c0012015-03-30 17:31:49 +03001033}
1034
1035static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
1036{
1037 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
Linus Walleijacfd4c62015-12-08 00:18:59 +01001038 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001039 const struct intel_community *community;
Mika Westerberga60eac32017-11-27 16:54:43 +03001040 const struct intel_padgroup *padgrp;
1041 int pin;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001042
Mika Westerberga60eac32017-11-27 16:54:43 +03001043 pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), &community, &padgrp);
1044 if (pin >= 0) {
Andy Shevchenko04035f72018-09-26 17:50:26 +03001045 unsigned int gpp, gpp_offset;
Mika Westerberg919eb472017-06-06 16:18:17 +03001046 unsigned long flags;
Kai-Heng Feng670784f2019-04-30 16:37:53 +08001047 void __iomem *reg, *is;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001048 u32 value;
1049
Mika Westerberg919eb472017-06-06 16:18:17 +03001050 gpp = padgrp->reg_num;
1051 gpp_offset = padgroup_offset(padgrp, pin);
1052
Mika Westerberg7981c0012015-03-30 17:31:49 +03001053 reg = community->regs + community->ie_offset + gpp * 4;
Kai-Heng Feng670784f2019-04-30 16:37:53 +08001054 is = community->regs + community->is_offset + gpp * 4;
Mika Westerberg919eb472017-06-06 16:18:17 +03001055
1056 raw_spin_lock_irqsave(&pctrl->lock, flags);
Kai-Heng Feng670784f2019-04-30 16:37:53 +08001057
1058 /* Clear interrupt status first to avoid unexpected interrupt */
1059 writel(BIT(gpp_offset), is);
1060
Mika Westerberg7981c0012015-03-30 17:31:49 +03001061 value = readl(reg);
1062 if (mask)
1063 value &= ~BIT(gpp_offset);
1064 else
1065 value |= BIT(gpp_offset);
1066 writel(value, reg);
Mika Westerberg919eb472017-06-06 16:18:17 +03001067 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001068 }
Mika Westerberg7981c0012015-03-30 17:31:49 +03001069}
1070
1071static void intel_gpio_irq_mask(struct irq_data *d)
1072{
1073 intel_gpio_irq_mask_unmask(d, true);
1074}
1075
1076static void intel_gpio_irq_unmask(struct irq_data *d)
1077{
1078 intel_gpio_irq_mask_unmask(d, false);
1079}
1080
Andy Shevchenko04035f72018-09-26 17:50:26 +03001081static int intel_gpio_irq_type(struct irq_data *d, unsigned int type)
Mika Westerberg7981c0012015-03-30 17:31:49 +03001082{
1083 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
Linus Walleijacfd4c62015-12-08 00:18:59 +01001084 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
Andy Shevchenko04035f72018-09-26 17:50:26 +03001085 unsigned int pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001086 unsigned long flags;
1087 void __iomem *reg;
1088 u32 value;
1089
1090 reg = intel_get_padcfg(pctrl, pin, PADCFG0);
1091 if (!reg)
1092 return -EINVAL;
1093
Mika Westerberg4341e8a2015-10-21 13:08:44 +03001094 /*
1095 * If the pin is in ACPI mode it is still usable as a GPIO but it
1096 * cannot be used as IRQ because GPI_IS status bit will not be
1097 * updated by the host controller hardware.
1098 */
1099 if (intel_pad_acpi_mode(pctrl, pin)) {
1100 dev_warn(pctrl->dev, "pin %u cannot be used as IRQ\n", pin);
1101 return -EPERM;
1102 }
1103
Mika Westerberg27d90982016-06-16 11:25:36 +03001104 raw_spin_lock_irqsave(&pctrl->lock, flags);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001105
Mika Westerbergf5a26ac2017-11-29 16:25:44 +03001106 intel_gpio_set_gpio_mode(reg);
1107
Andy Shevchenkoaf7e3ee2020-06-12 17:49:54 +03001108 /* Disable TX buffer and enable RX (this will be input) */
1109 __intel_gpio_set_direction(reg, true);
1110
Mika Westerberg7981c0012015-03-30 17:31:49 +03001111 value = readl(reg);
1112
1113 value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
1114
1115 if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
1116 value |= PADCFG0_RXEVCFG_EDGE_BOTH << PADCFG0_RXEVCFG_SHIFT;
1117 } else if (type & IRQ_TYPE_EDGE_FALLING) {
1118 value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
1119 value |= PADCFG0_RXINV;
1120 } else if (type & IRQ_TYPE_EDGE_RISING) {
1121 value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
Qipeng Zhabf380cf2016-03-17 02:15:25 +08001122 } else if (type & IRQ_TYPE_LEVEL_MASK) {
1123 if (type & IRQ_TYPE_LEVEL_LOW)
1124 value |= PADCFG0_RXINV;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001125 } else {
1126 value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT;
1127 }
1128
1129 writel(value, reg);
1130
1131 if (type & IRQ_TYPE_EDGE_BOTH)
Thomas Gleixnerfc756bc2015-06-23 15:52:45 +02001132 irq_set_handler_locked(d, handle_edge_irq);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001133 else if (type & IRQ_TYPE_LEVEL_MASK)
Thomas Gleixnerfc756bc2015-06-23 15:52:45 +02001134 irq_set_handler_locked(d, handle_level_irq);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001135
Mika Westerberg27d90982016-06-16 11:25:36 +03001136 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001137
1138 return 0;
1139}
1140
1141static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
1142{
1143 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
Linus Walleijacfd4c62015-12-08 00:18:59 +01001144 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
Andy Shevchenko04035f72018-09-26 17:50:26 +03001145 unsigned int pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001146
Mika Westerberg7981c0012015-03-30 17:31:49 +03001147 if (on)
Nilesh Bacchewar01dabe92016-09-21 16:35:23 -07001148 enable_irq_wake(pctrl->irq);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001149 else
Nilesh Bacchewar01dabe92016-09-21 16:35:23 -07001150 disable_irq_wake(pctrl->irq);
Andy Shevchenko9a520fd2016-07-08 14:30:46 +03001151
Mika Westerberg7981c0012015-03-30 17:31:49 +03001152 dev_dbg(pctrl->dev, "%sable wake for pin %u\n", on ? "en" : "dis", pin);
1153 return 0;
1154}
1155
Andy Shevchenko86851bb2020-06-12 17:49:56 +03001156static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
1157 const struct intel_community *community)
Mika Westerberg7981c0012015-03-30 17:31:49 +03001158{
Mika Westerberg193b40c2015-10-21 13:08:43 +03001159 struct gpio_chip *gc = &pctrl->chip;
Andy Shevchenko86851bb2020-06-12 17:49:56 +03001160 unsigned int gpp;
1161 int ret = 0;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001162
1163 for (gpp = 0; gpp < community->ngpps; gpp++) {
Mika Westerberg919eb472017-06-06 16:18:17 +03001164 const struct intel_padgroup *padgrp = &community->gpps[gpp];
Mika Westerberg7981c0012015-03-30 17:31:49 +03001165 unsigned long pending, enabled, gpp_offset;
Andy Shevchenkoe64fbfa2020-06-12 17:50:00 +03001166 unsigned long flags;
1167
1168 raw_spin_lock_irqsave(&pctrl->lock, flags);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001169
Mika Westerbergcf769bd2017-10-23 15:40:25 +03001170 pending = readl(community->regs + community->is_offset +
1171 padgrp->reg_num * 4);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001172 enabled = readl(community->regs + community->ie_offset +
Mika Westerberg919eb472017-06-06 16:18:17 +03001173 padgrp->reg_num * 4);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001174
Andy Shevchenkoe64fbfa2020-06-12 17:50:00 +03001175 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
1176
Mika Westerberg7981c0012015-03-30 17:31:49 +03001177 /* Only interrupts that are enabled */
1178 pending &= enabled;
1179
Mika Westerberg919eb472017-06-06 16:18:17 +03001180 for_each_set_bit(gpp_offset, &pending, padgrp->size) {
Andy Shevchenko11b389c2019-11-06 16:39:48 +02001181 unsigned int irq;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001182
Thierry Redingf0fbe7b2017-11-07 19:15:47 +01001183 irq = irq_find_mapping(gc->irq.domain,
Mika Westerberga60eac32017-11-27 16:54:43 +03001184 padgrp->gpio_base + gpp_offset);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001185 generic_handle_irq(irq);
1186 }
Andy Shevchenko86851bb2020-06-12 17:49:56 +03001187
1188 ret += pending ? 1 : 0;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001189 }
Mika Westerberg193b40c2015-10-21 13:08:43 +03001190
1191 return ret;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001192}
1193
Mika Westerberg193b40c2015-10-21 13:08:43 +03001194static irqreturn_t intel_gpio_irq(int irq, void *data)
Mika Westerberg7981c0012015-03-30 17:31:49 +03001195{
Mika Westerberg193b40c2015-10-21 13:08:43 +03001196 const struct intel_community *community;
1197 struct intel_pinctrl *pctrl = data;
Andy Shevchenko86851bb2020-06-12 17:49:56 +03001198 unsigned int i;
1199 int ret = 0;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001200
Mika Westerberg7981c0012015-03-30 17:31:49 +03001201 /* Need to check all communities for pending interrupts */
Mika Westerberg193b40c2015-10-21 13:08:43 +03001202 for (i = 0; i < pctrl->ncommunities; i++) {
1203 community = &pctrl->communities[i];
Andy Shevchenko86851bb2020-06-12 17:49:56 +03001204 ret += intel_gpio_community_irq_handler(pctrl, community);
Mika Westerberg193b40c2015-10-21 13:08:43 +03001205 }
Mika Westerberg7981c0012015-03-30 17:31:49 +03001206
Andy Shevchenko86851bb2020-06-12 17:49:56 +03001207 return IRQ_RETVAL(ret);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001208}
1209
Linus Walleij6d416b92020-01-09 08:53:28 +01001210static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
1211 const struct intel_community *community)
Mika Westerberga60eac32017-11-27 16:54:43 +03001212{
Colin Ian King33b6cb52017-12-04 17:08:15 +00001213 int ret = 0, i;
Mika Westerberga60eac32017-11-27 16:54:43 +03001214
1215 for (i = 0; i < community->ngpps; i++) {
1216 const struct intel_padgroup *gpp = &community->gpps[i];
1217
Andy Shevchenkoe5a4ab62020-04-13 14:18:20 +03001218 if (gpp->gpio_base == INTEL_GPIO_BASE_NOMAP)
Mika Westerberga60eac32017-11-27 16:54:43 +03001219 continue;
1220
1221 ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev),
1222 gpp->gpio_base, gpp->base,
1223 gpp->size);
1224 if (ret)
1225 return ret;
1226 }
1227
1228 return ret;
1229}
1230
Linus Walleij6d416b92020-01-09 08:53:28 +01001231static int intel_gpio_add_pin_ranges(struct gpio_chip *gc)
1232{
1233 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
1234 int ret, i;
1235
1236 for (i = 0; i < pctrl->ncommunities; i++) {
1237 struct intel_community *community = &pctrl->communities[i];
1238
1239 ret = intel_gpio_add_community_ranges(pctrl, community);
1240 if (ret) {
1241 dev_err(pctrl->dev, "failed to add GPIO pin range\n");
1242 return ret;
1243 }
1244 }
1245
1246 return 0;
1247}
1248
Andy Shevchenko11b389c2019-11-06 16:39:48 +02001249static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
Mika Westerberga60eac32017-11-27 16:54:43 +03001250{
1251 const struct intel_community *community;
Andy Shevchenko04035f72018-09-26 17:50:26 +03001252 unsigned int ngpio = 0;
Mika Westerberga60eac32017-11-27 16:54:43 +03001253 int i, j;
1254
1255 for (i = 0; i < pctrl->ncommunities; i++) {
1256 community = &pctrl->communities[i];
1257 for (j = 0; j < community->ngpps; j++) {
1258 const struct intel_padgroup *gpp = &community->gpps[j];
1259
Andy Shevchenkoe5a4ab62020-04-13 14:18:20 +03001260 if (gpp->gpio_base == INTEL_GPIO_BASE_NOMAP)
Mika Westerberga60eac32017-11-27 16:54:43 +03001261 continue;
1262
1263 if (gpp->gpio_base + gpp->size > ngpio)
1264 ngpio = gpp->gpio_base + gpp->size;
1265 }
1266 }
1267
1268 return ngpio;
1269}
1270
Mika Westerberg7981c0012015-03-30 17:31:49 +03001271static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
1272{
Linus Walleij6d416b92020-01-09 08:53:28 +01001273 int ret;
Linus Walleijaf0c5332020-01-09 08:53:29 +01001274 struct gpio_irq_chip *girq;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001275
1276 pctrl->chip = intel_gpio_chip;
1277
Andy Shevchenko57ff2df2019-09-16 17:47:51 +03001278 /* Setup GPIO chip */
Mika Westerberga60eac32017-11-27 16:54:43 +03001279 pctrl->chip.ngpio = intel_gpio_ngpio(pctrl);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001280 pctrl->chip.label = dev_name(pctrl->dev);
Linus Walleij58383c782015-11-04 09:56:26 +01001281 pctrl->chip.parent = pctrl->dev;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001282 pctrl->chip.base = -1;
Linus Walleij6d416b92020-01-09 08:53:28 +01001283 pctrl->chip.add_pin_ranges = intel_gpio_add_pin_ranges;
Nilesh Bacchewar01dabe92016-09-21 16:35:23 -07001284 pctrl->irq = irq;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001285
Andy Shevchenko57ff2df2019-09-16 17:47:51 +03001286 /* Setup IRQ chip */
1287 pctrl->irqchip.name = dev_name(pctrl->dev);
1288 pctrl->irqchip.irq_ack = intel_gpio_irq_ack;
1289 pctrl->irqchip.irq_mask = intel_gpio_irq_mask;
1290 pctrl->irqchip.irq_unmask = intel_gpio_irq_unmask;
1291 pctrl->irqchip.irq_set_type = intel_gpio_irq_type;
1292 pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake;
1293 pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND;
1294
Mika Westerberg193b40c2015-10-21 13:08:43 +03001295 /*
Linus Walleijaf0c5332020-01-09 08:53:29 +01001296 * On some platforms several GPIO controllers share the same interrupt
1297 * line.
Mika Westerberg193b40c2015-10-21 13:08:43 +03001298 */
Mika Westerberg1a7d1cb2016-06-16 11:25:37 +03001299 ret = devm_request_irq(pctrl->dev, irq, intel_gpio_irq,
1300 IRQF_SHARED | IRQF_NO_THREAD,
Mika Westerberg193b40c2015-10-21 13:08:43 +03001301 dev_name(pctrl->dev), pctrl);
1302 if (ret) {
1303 dev_err(pctrl->dev, "failed to request interrupt\n");
Mika Westerbergf25c3aa2017-01-10 17:31:57 +03001304 return ret;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001305 }
1306
Linus Walleijaf0c5332020-01-09 08:53:29 +01001307 girq = &pctrl->chip.irq;
1308 girq->chip = &pctrl->irqchip;
1309 /* This will let us handle the IRQ in the driver */
1310 girq->parent_handler = NULL;
1311 girq->num_parents = 0;
1312 girq->default_type = IRQ_TYPE_NONE;
1313 girq->handler = handle_bad_irq;
1314
1315 ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001316 if (ret) {
Linus Walleijaf0c5332020-01-09 08:53:29 +01001317 dev_err(pctrl->dev, "failed to register gpiochip\n");
Mika Westerbergf25c3aa2017-01-10 17:31:57 +03001318 return ret;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001319 }
1320
Mika Westerberg7981c0012015-03-30 17:31:49 +03001321 return 0;
1322}
1323
Mika Westerberg919eb472017-06-06 16:18:17 +03001324static int intel_pinctrl_add_padgroups(struct intel_pinctrl *pctrl,
1325 struct intel_community *community)
1326{
1327 struct intel_padgroup *gpps;
Andy Shevchenko04035f72018-09-26 17:50:26 +03001328 unsigned int npins = community->npins;
1329 unsigned int padown_num = 0;
Mika Westerberg919eb472017-06-06 16:18:17 +03001330 size_t ngpps, i;
1331
1332 if (community->gpps)
1333 ngpps = community->ngpps;
1334 else
1335 ngpps = DIV_ROUND_UP(community->npins, community->gpp_size);
1336
1337 gpps = devm_kcalloc(pctrl->dev, ngpps, sizeof(*gpps), GFP_KERNEL);
1338 if (!gpps)
1339 return -ENOMEM;
1340
1341 for (i = 0; i < ngpps; i++) {
1342 if (community->gpps) {
1343 gpps[i] = community->gpps[i];
1344 } else {
Andy Shevchenko04035f72018-09-26 17:50:26 +03001345 unsigned int gpp_size = community->gpp_size;
Mika Westerberg919eb472017-06-06 16:18:17 +03001346
1347 gpps[i].reg_num = i;
1348 gpps[i].base = community->pin_base + i * gpp_size;
1349 gpps[i].size = min(gpp_size, npins);
1350 npins -= gpps[i].size;
1351 }
1352
1353 if (gpps[i].size > 32)
1354 return -EINVAL;
1355
Andy Shevchenkoe5a4ab62020-04-13 14:18:20 +03001356 /* Special treatment for GPIO base */
1357 switch (gpps[i].gpio_base) {
1358 case INTEL_GPIO_BASE_MATCH:
1359 gpps[i].gpio_base = gpps[i].base;
1360 break;
Andy Shevchenko9bd59152020-04-13 14:18:24 +03001361 case INTEL_GPIO_BASE_ZERO:
1362 gpps[i].gpio_base = 0;
1363 break;
Andy Shevchenkoe5a4ab62020-04-13 14:18:20 +03001364 case INTEL_GPIO_BASE_NOMAP:
1365 default:
1366 break;
1367 }
Mika Westerberga60eac32017-11-27 16:54:43 +03001368
Mika Westerberg919eb472017-06-06 16:18:17 +03001369 gpps[i].padown_num = padown_num;
1370
1371 /*
1372 * In older hardware the number of padown registers per
1373 * group is fixed regardless of the group size.
1374 */
1375 if (community->gpp_num_padown_regs)
1376 padown_num += community->gpp_num_padown_regs;
1377 else
1378 padown_num += DIV_ROUND_UP(gpps[i].size * 4, 32);
1379 }
1380
1381 community->ngpps = ngpps;
1382 community->gpps = gpps;
1383
1384 return 0;
1385}
1386
Mika Westerberg7981c0012015-03-30 17:31:49 +03001387static int intel_pinctrl_pm_init(struct intel_pinctrl *pctrl)
1388{
1389#ifdef CONFIG_PM_SLEEP
1390 const struct intel_pinctrl_soc_data *soc = pctrl->soc;
1391 struct intel_community_context *communities;
1392 struct intel_pad_context *pads;
1393 int i;
1394
1395 pads = devm_kcalloc(pctrl->dev, soc->npins, sizeof(*pads), GFP_KERNEL);
1396 if (!pads)
1397 return -ENOMEM;
1398
1399 communities = devm_kcalloc(pctrl->dev, pctrl->ncommunities,
1400 sizeof(*communities), GFP_KERNEL);
1401 if (!communities)
1402 return -ENOMEM;
1403
1404
1405 for (i = 0; i < pctrl->ncommunities; i++) {
1406 struct intel_community *community = &pctrl->communities[i];
Chris Chiua0a5f762019-04-15 13:53:58 +08001407 u32 *intmask, *hostown;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001408
1409 intmask = devm_kcalloc(pctrl->dev, community->ngpps,
1410 sizeof(*intmask), GFP_KERNEL);
1411 if (!intmask)
1412 return -ENOMEM;
1413
1414 communities[i].intmask = intmask;
Chris Chiua0a5f762019-04-15 13:53:58 +08001415
1416 hostown = devm_kcalloc(pctrl->dev, community->ngpps,
1417 sizeof(*hostown), GFP_KERNEL);
1418 if (!hostown)
1419 return -ENOMEM;
1420
1421 communities[i].hostown = hostown;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001422 }
1423
1424 pctrl->context.pads = pads;
1425 pctrl->context.communities = communities;
1426#endif
1427
1428 return 0;
1429}
1430
Andy Shevchenko0dd519e2018-10-17 19:10:27 +03001431static int intel_pinctrl_probe(struct platform_device *pdev,
1432 const struct intel_pinctrl_soc_data *soc_data)
Mika Westerberg7981c0012015-03-30 17:31:49 +03001433{
1434 struct intel_pinctrl *pctrl;
1435 int i, ret, irq;
1436
Mika Westerberg7981c0012015-03-30 17:31:49 +03001437 pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
1438 if (!pctrl)
1439 return -ENOMEM;
1440
1441 pctrl->dev = &pdev->dev;
1442 pctrl->soc = soc_data;
Mika Westerberg27d90982016-06-16 11:25:36 +03001443 raw_spin_lock_init(&pctrl->lock);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001444
1445 /*
1446 * Make a copy of the communities which we can use to hold pointers
1447 * to the registers.
1448 */
1449 pctrl->ncommunities = pctrl->soc->ncommunities;
1450 pctrl->communities = devm_kcalloc(&pdev->dev, pctrl->ncommunities,
1451 sizeof(*pctrl->communities), GFP_KERNEL);
1452 if (!pctrl->communities)
1453 return -ENOMEM;
1454
1455 for (i = 0; i < pctrl->ncommunities; i++) {
1456 struct intel_community *community = &pctrl->communities[i];
Mika Westerberg7981c0012015-03-30 17:31:49 +03001457 void __iomem *regs;
1458 u32 padbar;
1459
1460 *community = pctrl->soc->communities[i];
1461
Andy Shevchenko9d5b6a92019-07-03 17:44:20 +03001462 regs = devm_platform_ioremap_resource(pdev, community->barno);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001463 if (IS_ERR(regs))
1464 return PTR_ERR(regs);
1465
Mika Westerberge57725e2017-01-27 13:07:14 +03001466 /*
1467 * Determine community features based on the revision if
1468 * not specified already.
1469 */
1470 if (!community->features) {
1471 u32 rev;
1472
1473 rev = (readl(regs + REVID) & REVID_MASK) >> REVID_SHIFT;
Mika Westerberg04cc0582017-01-27 13:07:15 +03001474 if (rev >= 0x94) {
Mika Westerberge57725e2017-01-27 13:07:14 +03001475 community->features |= PINCTRL_FEATURE_DEBOUNCE;
Mika Westerberg04cc0582017-01-27 13:07:15 +03001476 community->features |= PINCTRL_FEATURE_1K_PD;
1477 }
Mika Westerberge57725e2017-01-27 13:07:14 +03001478 }
1479
Mika Westerberg7981c0012015-03-30 17:31:49 +03001480 /* Read offset of the pad configuration registers */
1481 padbar = readl(regs + PADBAR);
1482
1483 community->regs = regs;
1484 community->pad_regs = regs + padbar;
Mika Westerberg919eb472017-06-06 16:18:17 +03001485
1486 ret = intel_pinctrl_add_padgroups(pctrl, community);
1487 if (ret)
1488 return ret;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001489 }
1490
1491 irq = platform_get_irq(pdev, 0);
Stephen Boyd4e73d022019-07-30 11:15:34 -07001492 if (irq < 0)
Mika Westerberg7981c0012015-03-30 17:31:49 +03001493 return irq;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001494
1495 ret = intel_pinctrl_pm_init(pctrl);
1496 if (ret)
1497 return ret;
1498
1499 pctrl->pctldesc = intel_pinctrl_desc;
1500 pctrl->pctldesc.name = dev_name(&pdev->dev);
1501 pctrl->pctldesc.pins = pctrl->soc->pins;
1502 pctrl->pctldesc.npins = pctrl->soc->npins;
1503
Laxman Dewangan54d46cd2016-02-28 14:42:47 +05301504 pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc,
1505 pctrl);
Masahiro Yamada323de9e2015-06-09 13:01:16 +09001506 if (IS_ERR(pctrl->pctldev)) {
Mika Westerberg7981c0012015-03-30 17:31:49 +03001507 dev_err(&pdev->dev, "failed to register pinctrl driver\n");
Masahiro Yamada323de9e2015-06-09 13:01:16 +09001508 return PTR_ERR(pctrl->pctldev);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001509 }
1510
1511 ret = intel_gpio_probe(pctrl, irq);
Laxman Dewangan54d46cd2016-02-28 14:42:47 +05301512 if (ret)
Mika Westerberg7981c0012015-03-30 17:31:49 +03001513 return ret;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001514
1515 platform_set_drvdata(pdev, pctrl);
1516
1517 return 0;
1518}
Mika Westerberg7981c0012015-03-30 17:31:49 +03001519
Andy Shevchenko70c263c2018-08-30 19:27:40 +03001520int intel_pinctrl_probe_by_hid(struct platform_device *pdev)
1521{
1522 const struct intel_pinctrl_soc_data *data;
1523
1524 data = device_get_match_data(&pdev->dev);
Andy Shevchenkoff360d62020-07-29 14:57:06 +03001525 if (!data)
1526 return -ENODATA;
1527
Andy Shevchenko70c263c2018-08-30 19:27:40 +03001528 return intel_pinctrl_probe(pdev, data);
1529}
1530EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_hid);
1531
Andy Shevchenko924cf802018-08-30 19:27:36 +03001532int intel_pinctrl_probe_by_uid(struct platform_device *pdev)
1533{
Andy Shevchenkoff360d62020-07-29 14:57:06 +03001534 const struct intel_pinctrl_soc_data *data;
1535
1536 data = intel_pinctrl_get_soc_data(pdev);
1537 if (IS_ERR(data))
1538 return PTR_ERR(data);
1539
1540 return intel_pinctrl_probe(pdev, data);
1541}
1542EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_uid);
1543
1544const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_device *pdev)
1545{
Andy Shevchenko924cf802018-08-30 19:27:36 +03001546 const struct intel_pinctrl_soc_data *data = NULL;
1547 const struct intel_pinctrl_soc_data **table;
1548 struct acpi_device *adev;
1549 unsigned int i;
1550
1551 adev = ACPI_COMPANION(&pdev->dev);
1552 if (adev) {
1553 const void *match = device_get_match_data(&pdev->dev);
1554
1555 table = (const struct intel_pinctrl_soc_data **)match;
1556 for (i = 0; table[i]; i++) {
1557 if (!strcmp(adev->pnp.unique_id, table[i]->uid)) {
1558 data = table[i];
1559 break;
1560 }
1561 }
1562 } else {
1563 const struct platform_device_id *id;
1564
1565 id = platform_get_device_id(pdev);
1566 if (!id)
Andy Shevchenkoff360d62020-07-29 14:57:06 +03001567 return ERR_PTR(-ENODEV);
Andy Shevchenko924cf802018-08-30 19:27:36 +03001568
1569 table = (const struct intel_pinctrl_soc_data **)id->driver_data;
1570 data = table[pdev->id];
1571 }
Andy Shevchenko924cf802018-08-30 19:27:36 +03001572
Andy Shevchenkoff360d62020-07-29 14:57:06 +03001573 return data ?: ERR_PTR(-ENODATA);
Andy Shevchenko924cf802018-08-30 19:27:36 +03001574}
Andy Shevchenkoff360d62020-07-29 14:57:06 +03001575EXPORT_SYMBOL_GPL(intel_pinctrl_get_soc_data);
Andy Shevchenko924cf802018-08-30 19:27:36 +03001576
Mika Westerberg7981c0012015-03-30 17:31:49 +03001577#ifdef CONFIG_PM_SLEEP
Andy Shevchenko04035f72018-09-26 17:50:26 +03001578static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin)
Mika Westerbergc538b942016-10-10 16:39:31 +03001579{
1580 const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
1581
1582 if (!pd || !intel_pad_usable(pctrl, pin))
1583 return false;
1584
1585 /*
1586 * Only restore the pin if it is actually in use by the kernel (or
1587 * by userspace). It is possible that some pins are used by the
1588 * BIOS during resume and those are not always locked down so leave
1589 * them alone.
1590 */
1591 if (pd->mux_owner || pd->gpio_owner ||
Chris Chiu6cb08802019-08-16 17:38:38 +08001592 gpiochip_line_is_irq(&pctrl->chip, intel_pin_to_gpio(pctrl, pin)))
Mika Westerbergc538b942016-10-10 16:39:31 +03001593 return true;
1594
1595 return false;
1596}
1597
Binbin Wu2fef3272019-04-08 18:49:26 +08001598int intel_pinctrl_suspend_noirq(struct device *dev)
Mika Westerberg7981c0012015-03-30 17:31:49 +03001599{
Wolfram Sangcb035d72018-10-21 22:00:29 +02001600 struct intel_pinctrl *pctrl = dev_get_drvdata(dev);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001601 struct intel_community_context *communities;
1602 struct intel_pad_context *pads;
1603 int i;
1604
1605 pads = pctrl->context.pads;
1606 for (i = 0; i < pctrl->soc->npins; i++) {
1607 const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
Mika Westerberge57725e2017-01-27 13:07:14 +03001608 void __iomem *padcfg;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001609 u32 val;
1610
Mika Westerbergc538b942016-10-10 16:39:31 +03001611 if (!intel_pinctrl_should_save(pctrl, desc->number))
Mika Westerberg7981c0012015-03-30 17:31:49 +03001612 continue;
1613
1614 val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0));
1615 pads[i].padcfg0 = val & ~PADCFG0_GPIORXSTATE;
1616 val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG1));
1617 pads[i].padcfg1 = val;
Mika Westerberge57725e2017-01-27 13:07:14 +03001618
1619 padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG2);
1620 if (padcfg)
1621 pads[i].padcfg2 = readl(padcfg);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001622 }
1623
1624 communities = pctrl->context.communities;
1625 for (i = 0; i < pctrl->ncommunities; i++) {
1626 struct intel_community *community = &pctrl->communities[i];
1627 void __iomem *base;
Andy Shevchenko04035f72018-09-26 17:50:26 +03001628 unsigned int gpp;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001629
1630 base = community->regs + community->ie_offset;
1631 for (gpp = 0; gpp < community->ngpps; gpp++)
1632 communities[i].intmask[gpp] = readl(base + gpp * 4);
Chris Chiua0a5f762019-04-15 13:53:58 +08001633
1634 base = community->regs + community->hostown_offset;
1635 for (gpp = 0; gpp < community->ngpps; gpp++)
1636 communities[i].hostown[gpp] = readl(base + gpp * 4);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001637 }
1638
1639 return 0;
1640}
Binbin Wu2fef3272019-04-08 18:49:26 +08001641EXPORT_SYMBOL_GPL(intel_pinctrl_suspend_noirq);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001642
Mika Westerbergf487bbf2015-10-13 17:51:25 +03001643static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
1644{
1645 size_t i;
1646
1647 for (i = 0; i < pctrl->ncommunities; i++) {
1648 const struct intel_community *community;
1649 void __iomem *base;
Andy Shevchenko04035f72018-09-26 17:50:26 +03001650 unsigned int gpp;
Mika Westerbergf487bbf2015-10-13 17:51:25 +03001651
1652 community = &pctrl->communities[i];
1653 base = community->regs;
1654
1655 for (gpp = 0; gpp < community->ngpps; gpp++) {
1656 /* Mask and clear all interrupts */
1657 writel(0, base + community->ie_offset + gpp * 4);
Mika Westerbergcf769bd2017-10-23 15:40:25 +03001658 writel(0xffff, base + community->is_offset + gpp * 4);
Mika Westerbergf487bbf2015-10-13 17:51:25 +03001659 }
1660 }
1661}
1662
Andy Shevchenko942c5ea2019-10-22 13:00:04 +03001663static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value)
Chris Chiua0a5f762019-04-15 13:53:58 +08001664{
Andy Shevchenko5f61d952019-04-28 20:19:06 +03001665 u32 curr, updated;
Chris Chiua0a5f762019-04-15 13:53:58 +08001666
Andy Shevchenko942c5ea2019-10-22 13:00:04 +03001667 curr = readl(reg);
Andy Shevchenko5f61d952019-04-28 20:19:06 +03001668
Andy Shevchenko942c5ea2019-10-22 13:00:04 +03001669 updated = (curr & ~mask) | (value & mask);
1670 if (curr == updated)
1671 return false;
1672
1673 writel(updated, reg);
1674 return true;
Chris Chiua0a5f762019-04-15 13:53:58 +08001675}
1676
Andy Shevchenko7101e022019-10-22 13:00:01 +03001677static void intel_restore_hostown(struct intel_pinctrl *pctrl, unsigned int c,
1678 void __iomem *base, unsigned int gpp, u32 saved)
1679{
1680 const struct intel_community *community = &pctrl->communities[c];
1681 const struct intel_padgroup *padgrp = &community->gpps[gpp];
1682 struct device *dev = pctrl->dev;
Andy Shevchenkod1bfd022020-06-10 21:14:49 +03001683 const char *dummy;
1684 u32 requested = 0;
1685 unsigned int i;
Andy Shevchenko7101e022019-10-22 13:00:01 +03001686
Andy Shevchenkoe5a4ab62020-04-13 14:18:20 +03001687 if (padgrp->gpio_base == INTEL_GPIO_BASE_NOMAP)
Andy Shevchenko7101e022019-10-22 13:00:01 +03001688 return;
1689
Andy Shevchenkod1bfd022020-06-10 21:14:49 +03001690 for_each_requested_gpio_in_range(&pctrl->chip, i, padgrp->gpio_base, padgrp->size, dummy)
1691 requested |= BIT(i);
1692
Andy Shevchenko942c5ea2019-10-22 13:00:04 +03001693 if (!intel_gpio_update_reg(base + gpp * 4, requested, saved))
Andy Shevchenko7101e022019-10-22 13:00:01 +03001694 return;
1695
Andy Shevchenko764cfe32019-10-22 13:00:03 +03001696 dev_dbg(dev, "restored hostown %u/%u %#08x\n", c, gpp, readl(base + gpp * 4));
Andy Shevchenko7101e022019-10-22 13:00:01 +03001697}
1698
Andy Shevchenko471dd9a2019-10-22 13:00:02 +03001699static void intel_restore_intmask(struct intel_pinctrl *pctrl, unsigned int c,
1700 void __iomem *base, unsigned int gpp, u32 saved)
1701{
1702 struct device *dev = pctrl->dev;
1703
Andy Shevchenko942c5ea2019-10-22 13:00:04 +03001704 if (!intel_gpio_update_reg(base + gpp * 4, ~0U, saved))
1705 return;
1706
Andy Shevchenko471dd9a2019-10-22 13:00:02 +03001707 dev_dbg(dev, "restored mask %u/%u %#08x\n", c, gpp, readl(base + gpp * 4));
1708}
1709
Andy Shevchenkof78f1522019-10-22 13:00:00 +03001710static void intel_restore_padcfg(struct intel_pinctrl *pctrl, unsigned int pin,
1711 unsigned int reg, u32 saved)
1712{
1713 u32 mask = (reg == PADCFG0) ? PADCFG0_GPIORXSTATE : 0;
1714 unsigned int n = reg / sizeof(u32);
1715 struct device *dev = pctrl->dev;
1716 void __iomem *padcfg;
Andy Shevchenkof78f1522019-10-22 13:00:00 +03001717
1718 padcfg = intel_get_padcfg(pctrl, pin, reg);
1719 if (!padcfg)
1720 return;
1721
Andy Shevchenko942c5ea2019-10-22 13:00:04 +03001722 if (!intel_gpio_update_reg(padcfg, ~mask, saved))
Andy Shevchenkof78f1522019-10-22 13:00:00 +03001723 return;
1724
Andy Shevchenkof78f1522019-10-22 13:00:00 +03001725 dev_dbg(dev, "restored pin %u padcfg%u %#08x\n", pin, n, readl(padcfg));
1726}
1727
Binbin Wu2fef3272019-04-08 18:49:26 +08001728int intel_pinctrl_resume_noirq(struct device *dev)
Mika Westerberg7981c0012015-03-30 17:31:49 +03001729{
Wolfram Sangcb035d72018-10-21 22:00:29 +02001730 struct intel_pinctrl *pctrl = dev_get_drvdata(dev);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001731 const struct intel_community_context *communities;
1732 const struct intel_pad_context *pads;
1733 int i;
1734
1735 /* Mask all interrupts */
1736 intel_gpio_irq_init(pctrl);
1737
1738 pads = pctrl->context.pads;
1739 for (i = 0; i < pctrl->soc->npins; i++) {
1740 const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
Mika Westerberg7981c0012015-03-30 17:31:49 +03001741
Mika Westerbergc538b942016-10-10 16:39:31 +03001742 if (!intel_pinctrl_should_save(pctrl, desc->number))
Mika Westerberg7981c0012015-03-30 17:31:49 +03001743 continue;
1744
Andy Shevchenkof78f1522019-10-22 13:00:00 +03001745 intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0);
1746 intel_restore_padcfg(pctrl, desc->number, PADCFG1, pads[i].padcfg1);
1747 intel_restore_padcfg(pctrl, desc->number, PADCFG2, pads[i].padcfg2);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001748 }
1749
1750 communities = pctrl->context.communities;
1751 for (i = 0; i < pctrl->ncommunities; i++) {
1752 struct intel_community *community = &pctrl->communities[i];
1753 void __iomem *base;
Andy Shevchenko04035f72018-09-26 17:50:26 +03001754 unsigned int gpp;
Mika Westerberg7981c0012015-03-30 17:31:49 +03001755
1756 base = community->regs + community->ie_offset;
Andy Shevchenko471dd9a2019-10-22 13:00:02 +03001757 for (gpp = 0; gpp < community->ngpps; gpp++)
1758 intel_restore_intmask(pctrl, i, base, gpp, communities[i].intmask[gpp]);
Chris Chiua0a5f762019-04-15 13:53:58 +08001759
1760 base = community->regs + community->hostown_offset;
Andy Shevchenko7101e022019-10-22 13:00:01 +03001761 for (gpp = 0; gpp < community->ngpps; gpp++)
1762 intel_restore_hostown(pctrl, i, base, gpp, communities[i].hostown[gpp]);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001763 }
1764
1765 return 0;
1766}
Binbin Wu2fef3272019-04-08 18:49:26 +08001767EXPORT_SYMBOL_GPL(intel_pinctrl_resume_noirq);
Mika Westerberg7981c0012015-03-30 17:31:49 +03001768#endif
1769
1770MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>");
1771MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1772MODULE_DESCRIPTION("Intel pinctrl/GPIO core driver");
1773MODULE_LICENSE("GPL v2");