blob: 8d7ae28fbd677e354503c73ca3dad819fb1e870c [file] [log] [blame]
Jarkko Nikulab711f682021-06-02 14:32:59 +03001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel Quadrature Encoder Peripheral driver
4 *
5 * Copyright (C) 2019-2021 Intel Corporation
6 *
7 * Author: Felipe Balbi (Intel)
8 * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
9 * Author: Raymond Tan <raymond.tan@intel.com>
10 */
11#include <linux/bitops.h>
12#include <linux/counter.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/mutex.h>
16#include <linux/pci.h>
17#include <linux/pm_runtime.h>
18
19#define INTEL_QEPCON 0x00
20#define INTEL_QEPFLT 0x04
21#define INTEL_QEPCOUNT 0x08
22#define INTEL_QEPMAX 0x0c
23#define INTEL_QEPWDT 0x10
24#define INTEL_QEPCAPDIV 0x14
25#define INTEL_QEPCNTR 0x18
26#define INTEL_QEPCAPBUF 0x1c
27#define INTEL_QEPINT_STAT 0x20
28#define INTEL_QEPINT_MASK 0x24
29
30/* QEPCON */
31#define INTEL_QEPCON_EN BIT(0)
32#define INTEL_QEPCON_FLT_EN BIT(1)
33#define INTEL_QEPCON_EDGE_A BIT(2)
34#define INTEL_QEPCON_EDGE_B BIT(3)
35#define INTEL_QEPCON_EDGE_INDX BIT(4)
36#define INTEL_QEPCON_SWPAB BIT(5)
37#define INTEL_QEPCON_OP_MODE BIT(6)
38#define INTEL_QEPCON_PH_ERR BIT(7)
39#define INTEL_QEPCON_COUNT_RST_MODE BIT(8)
40#define INTEL_QEPCON_INDX_GATING_MASK GENMASK(10, 9)
41#define INTEL_QEPCON_INDX_GATING(n) (((n) & 3) << 9)
42#define INTEL_QEPCON_INDX_PAL_PBL INTEL_QEPCON_INDX_GATING(0)
43#define INTEL_QEPCON_INDX_PAL_PBH INTEL_QEPCON_INDX_GATING(1)
44#define INTEL_QEPCON_INDX_PAH_PBL INTEL_QEPCON_INDX_GATING(2)
45#define INTEL_QEPCON_INDX_PAH_PBH INTEL_QEPCON_INDX_GATING(3)
46#define INTEL_QEPCON_CAP_MODE BIT(11)
47#define INTEL_QEPCON_FIFO_THRE_MASK GENMASK(14, 12)
48#define INTEL_QEPCON_FIFO_THRE(n) ((((n) - 1) & 7) << 12)
49#define INTEL_QEPCON_FIFO_EMPTY BIT(15)
50
51/* QEPFLT */
52#define INTEL_QEPFLT_MAX_COUNT(n) ((n) & 0x1fffff)
53
54/* QEPINT */
55#define INTEL_QEPINT_FIFOCRIT BIT(5)
56#define INTEL_QEPINT_FIFOENTRY BIT(4)
57#define INTEL_QEPINT_QEPDIR BIT(3)
58#define INTEL_QEPINT_QEPRST_UP BIT(2)
59#define INTEL_QEPINT_QEPRST_DOWN BIT(1)
60#define INTEL_QEPINT_WDT BIT(0)
61
62#define INTEL_QEPINT_MASK_ALL GENMASK(5, 0)
63
64#define INTEL_QEP_CLK_PERIOD_NS 10
65
66#define INTEL_QEP_COUNTER_EXT_RW(_name) \
67{ \
68 .name = #_name, \
69 .read = _name##_read, \
70 .write = _name##_write, \
71}
72
73struct intel_qep {
74 struct counter_device counter;
75 struct mutex lock;
76 struct device *dev;
77 void __iomem *regs;
78 bool enabled;
79 /* Context save registers */
80 u32 qepcon;
81 u32 qepflt;
82 u32 qepmax;
83};
84
85static inline u32 intel_qep_readl(struct intel_qep *qep, u32 offset)
86{
87 return readl(qep->regs + offset);
88}
89
90static inline void intel_qep_writel(struct intel_qep *qep,
91 u32 offset, u32 value)
92{
93 writel(value, qep->regs + offset);
94}
95
96static void intel_qep_init(struct intel_qep *qep)
97{
98 u32 reg;
99
100 reg = intel_qep_readl(qep, INTEL_QEPCON);
101 reg &= ~INTEL_QEPCON_EN;
102 intel_qep_writel(qep, INTEL_QEPCON, reg);
103 qep->enabled = false;
104 /*
105 * Make sure peripheral is disabled by flushing the write with
106 * a dummy read
107 */
108 reg = intel_qep_readl(qep, INTEL_QEPCON);
109
110 reg &= ~(INTEL_QEPCON_OP_MODE | INTEL_QEPCON_FLT_EN);
111 reg |= INTEL_QEPCON_EDGE_A | INTEL_QEPCON_EDGE_B |
112 INTEL_QEPCON_EDGE_INDX | INTEL_QEPCON_COUNT_RST_MODE;
113 intel_qep_writel(qep, INTEL_QEPCON, reg);
114 intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
115}
116
117static int intel_qep_count_read(struct counter_device *counter,
118 struct counter_count *count,
119 unsigned long *val)
120{
121 struct intel_qep *const qep = counter->priv;
122
123 pm_runtime_get_sync(qep->dev);
124 *val = intel_qep_readl(qep, INTEL_QEPCOUNT);
125 pm_runtime_put(qep->dev);
126
127 return 0;
128}
129
130static const enum counter_count_function intel_qep_count_functions[] = {
131 COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
132};
133
134static int intel_qep_function_get(struct counter_device *counter,
135 struct counter_count *count,
136 size_t *function)
137{
138 *function = 0;
139
140 return 0;
141}
142
143static const enum counter_synapse_action intel_qep_synapse_actions[] = {
144 COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
145};
146
147static int intel_qep_action_get(struct counter_device *counter,
148 struct counter_count *count,
149 struct counter_synapse *synapse,
150 size_t *action)
151{
152 *action = 0;
153 return 0;
154}
155
156static const struct counter_ops intel_qep_counter_ops = {
157 .count_read = intel_qep_count_read,
158 .function_get = intel_qep_function_get,
159 .action_get = intel_qep_action_get,
160};
161
162#define INTEL_QEP_SIGNAL(_id, _name) { \
163 .id = (_id), \
164 .name = (_name), \
165}
166
167static struct counter_signal intel_qep_signals[] = {
168 INTEL_QEP_SIGNAL(0, "Phase A"),
169 INTEL_QEP_SIGNAL(1, "Phase B"),
170 INTEL_QEP_SIGNAL(2, "Index"),
171};
172
173#define INTEL_QEP_SYNAPSE(_signal_id) { \
174 .actions_list = intel_qep_synapse_actions, \
175 .num_actions = ARRAY_SIZE(intel_qep_synapse_actions), \
176 .signal = &intel_qep_signals[(_signal_id)], \
177}
178
179static struct counter_synapse intel_qep_count_synapses[] = {
180 INTEL_QEP_SYNAPSE(0),
181 INTEL_QEP_SYNAPSE(1),
182 INTEL_QEP_SYNAPSE(2),
183};
184
185static ssize_t ceiling_read(struct counter_device *counter,
186 struct counter_count *count,
187 void *priv, char *buf)
188{
189 struct intel_qep *qep = counter->priv;
190 u32 reg;
191
192 pm_runtime_get_sync(qep->dev);
193 reg = intel_qep_readl(qep, INTEL_QEPMAX);
194 pm_runtime_put(qep->dev);
195
196 return sysfs_emit(buf, "%u\n", reg);
197}
198
199static ssize_t ceiling_write(struct counter_device *counter,
200 struct counter_count *count,
201 void *priv, const char *buf, size_t len)
202{
203 struct intel_qep *qep = counter->priv;
204 u32 max;
205 int ret;
206
207 ret = kstrtou32(buf, 0, &max);
208 if (ret < 0)
209 return ret;
210
211 mutex_lock(&qep->lock);
212 if (qep->enabled) {
213 ret = -EBUSY;
214 goto out;
215 }
216
217 pm_runtime_get_sync(qep->dev);
218 intel_qep_writel(qep, INTEL_QEPMAX, max);
219 pm_runtime_put(qep->dev);
220 ret = len;
221
222out:
223 mutex_unlock(&qep->lock);
224 return ret;
225}
226
227static ssize_t enable_read(struct counter_device *counter,
228 struct counter_count *count,
229 void *priv, char *buf)
230{
231 struct intel_qep *qep = counter->priv;
232
233 return sysfs_emit(buf, "%u\n", qep->enabled);
234}
235
236static ssize_t enable_write(struct counter_device *counter,
237 struct counter_count *count,
238 void *priv, const char *buf, size_t len)
239{
240 struct intel_qep *qep = counter->priv;
241 u32 reg;
242 bool val, changed;
243 int ret;
244
245 ret = kstrtobool(buf, &val);
246 if (ret)
247 return ret;
248
249 mutex_lock(&qep->lock);
250 changed = val ^ qep->enabled;
251 if (!changed)
252 goto out;
253
254 pm_runtime_get_sync(qep->dev);
255 reg = intel_qep_readl(qep, INTEL_QEPCON);
256 if (val) {
257 /* Enable peripheral and keep runtime PM always on */
258 reg |= INTEL_QEPCON_EN;
259 pm_runtime_get_noresume(qep->dev);
260 } else {
261 /* Let runtime PM be idle and disable peripheral */
262 pm_runtime_put_noidle(qep->dev);
263 reg &= ~INTEL_QEPCON_EN;
264 }
265 intel_qep_writel(qep, INTEL_QEPCON, reg);
266 pm_runtime_put(qep->dev);
267 qep->enabled = val;
268
269out:
270 mutex_unlock(&qep->lock);
271 return len;
272}
273
274static ssize_t spike_filter_ns_read(struct counter_device *counter,
275 struct counter_count *count,
276 void *priv, char *buf)
277{
278 struct intel_qep *qep = counter->priv;
279 u32 reg;
280
281 pm_runtime_get_sync(qep->dev);
282 reg = intel_qep_readl(qep, INTEL_QEPCON);
283 if (!(reg & INTEL_QEPCON_FLT_EN)) {
284 pm_runtime_put(qep->dev);
285 return sysfs_emit(buf, "0\n");
286 }
287 reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT));
288 pm_runtime_put(qep->dev);
289
290 return sysfs_emit(buf, "%u\n", (reg + 2) * INTEL_QEP_CLK_PERIOD_NS);
291}
292
293static ssize_t spike_filter_ns_write(struct counter_device *counter,
294 struct counter_count *count,
295 void *priv, const char *buf, size_t len)
296{
297 struct intel_qep *qep = counter->priv;
298 u32 reg, length;
299 bool enable;
300 int ret;
301
302 ret = kstrtou32(buf, 0, &length);
303 if (ret < 0)
304 return ret;
305
306 /*
307 * Spike filter length is (MAX_COUNT + 2) clock periods.
308 * Disable filter when userspace writes 0, enable for valid
309 * nanoseconds values and error out otherwise.
310 */
311 length /= INTEL_QEP_CLK_PERIOD_NS;
312 if (length == 0) {
313 enable = false;
314 length = 0;
315 } else if (length >= 2) {
316 enable = true;
317 length -= 2;
318 } else {
319 return -EINVAL;
320 }
321
322 if (length > INTEL_QEPFLT_MAX_COUNT(length))
323 return -EINVAL;
324
325 mutex_lock(&qep->lock);
326 if (qep->enabled) {
327 ret = -EBUSY;
328 goto out;
329 }
330
331 pm_runtime_get_sync(qep->dev);
332 reg = intel_qep_readl(qep, INTEL_QEPCON);
333 if (enable)
334 reg |= INTEL_QEPCON_FLT_EN;
335 else
336 reg &= ~INTEL_QEPCON_FLT_EN;
337 intel_qep_writel(qep, INTEL_QEPFLT, length);
338 intel_qep_writel(qep, INTEL_QEPCON, reg);
339 pm_runtime_put(qep->dev);
340 ret = len;
341
342out:
343 mutex_unlock(&qep->lock);
344 return ret;
345}
346
347static ssize_t preset_enable_read(struct counter_device *counter,
348 struct counter_count *count,
349 void *priv, char *buf)
350{
351 struct intel_qep *qep = counter->priv;
352 u32 reg;
353
354 pm_runtime_get_sync(qep->dev);
355 reg = intel_qep_readl(qep, INTEL_QEPCON);
356 pm_runtime_put(qep->dev);
357 return sysfs_emit(buf, "%u\n", !(reg & INTEL_QEPCON_COUNT_RST_MODE));
358}
359
360static ssize_t preset_enable_write(struct counter_device *counter,
361 struct counter_count *count,
362 void *priv, const char *buf, size_t len)
363{
364 struct intel_qep *qep = counter->priv;
365 u32 reg;
366 bool val;
367 int ret;
368
369 ret = kstrtobool(buf, &val);
370 if (ret)
371 return ret;
372
373 mutex_lock(&qep->lock);
374 if (qep->enabled) {
375 ret = -EBUSY;
376 goto out;
377 }
378
379 pm_runtime_get_sync(qep->dev);
380 reg = intel_qep_readl(qep, INTEL_QEPCON);
381 if (val)
382 reg &= ~INTEL_QEPCON_COUNT_RST_MODE;
383 else
384 reg |= INTEL_QEPCON_COUNT_RST_MODE;
385
386 intel_qep_writel(qep, INTEL_QEPCON, reg);
387 pm_runtime_put(qep->dev);
388 ret = len;
389
390out:
391 mutex_unlock(&qep->lock);
392
393 return ret;
394}
395
396static const struct counter_count_ext intel_qep_count_ext[] = {
397 INTEL_QEP_COUNTER_EXT_RW(ceiling),
398 INTEL_QEP_COUNTER_EXT_RW(enable),
399 INTEL_QEP_COUNTER_EXT_RW(spike_filter_ns),
400 INTEL_QEP_COUNTER_EXT_RW(preset_enable)
401};
402
403static struct counter_count intel_qep_counter_count[] = {
404 {
405 .id = 0,
406 .name = "Channel 1 Count",
407 .functions_list = intel_qep_count_functions,
408 .num_functions = ARRAY_SIZE(intel_qep_count_functions),
409 .synapses = intel_qep_count_synapses,
410 .num_synapses = ARRAY_SIZE(intel_qep_count_synapses),
411 .ext = intel_qep_count_ext,
412 .num_ext = ARRAY_SIZE(intel_qep_count_ext),
413 },
414};
415
416static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
417{
418 struct intel_qep *qep;
419 struct device *dev = &pci->dev;
420 void __iomem *regs;
421 int ret;
422
423 qep = devm_kzalloc(dev, sizeof(*qep), GFP_KERNEL);
424 if (!qep)
425 return -ENOMEM;
426
427 ret = pcim_enable_device(pci);
428 if (ret)
429 return ret;
430
431 pci_set_master(pci);
432
433 ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
434 if (ret)
435 return ret;
436
437 regs = pcim_iomap_table(pci)[0];
438 if (!regs)
439 return -ENOMEM;
440
441 qep->dev = dev;
442 qep->regs = regs;
443 mutex_init(&qep->lock);
444
445 intel_qep_init(qep);
446 pci_set_drvdata(pci, qep);
447
448 qep->counter.name = pci_name(pci);
449 qep->counter.parent = dev;
450 qep->counter.ops = &intel_qep_counter_ops;
451 qep->counter.counts = intel_qep_counter_count;
452 qep->counter.num_counts = ARRAY_SIZE(intel_qep_counter_count);
453 qep->counter.signals = intel_qep_signals;
454 qep->counter.num_signals = ARRAY_SIZE(intel_qep_signals);
455 qep->counter.priv = qep;
456 qep->enabled = false;
457
458 pm_runtime_put(dev);
459 pm_runtime_allow(dev);
460
461 return devm_counter_register(&pci->dev, &qep->counter);
462}
463
464static void intel_qep_remove(struct pci_dev *pci)
465{
466 struct intel_qep *qep = pci_get_drvdata(pci);
467 struct device *dev = &pci->dev;
468
469 pm_runtime_forbid(dev);
470 if (!qep->enabled)
471 pm_runtime_get(dev);
472
473 intel_qep_writel(qep, INTEL_QEPCON, 0);
474}
475
Jarkko Nikulaac3bd9d2021-06-11 14:55:57 +0300476static int __maybe_unused intel_qep_suspend(struct device *dev)
Jarkko Nikulab711f682021-06-02 14:32:59 +0300477{
Jarkko Nikula93466212021-06-11 14:55:58 +0300478 struct pci_dev *pdev = to_pci_dev(dev);
Jarkko Nikulab711f682021-06-02 14:32:59 +0300479 struct intel_qep *qep = pci_get_drvdata(pdev);
480
481 qep->qepcon = intel_qep_readl(qep, INTEL_QEPCON);
482 qep->qepflt = intel_qep_readl(qep, INTEL_QEPFLT);
483 qep->qepmax = intel_qep_readl(qep, INTEL_QEPMAX);
484
485 return 0;
486}
487
Jarkko Nikulaac3bd9d2021-06-11 14:55:57 +0300488static int __maybe_unused intel_qep_resume(struct device *dev)
Jarkko Nikulab711f682021-06-02 14:32:59 +0300489{
Jarkko Nikula93466212021-06-11 14:55:58 +0300490 struct pci_dev *pdev = to_pci_dev(dev);
Jarkko Nikulab711f682021-06-02 14:32:59 +0300491 struct intel_qep *qep = pci_get_drvdata(pdev);
492
493 /*
494 * Make sure peripheral is disabled when restoring registers and
495 * control register bits that are writable only when the peripheral
496 * is disabled
497 */
498 intel_qep_writel(qep, INTEL_QEPCON, 0);
499 intel_qep_readl(qep, INTEL_QEPCON);
500
501 intel_qep_writel(qep, INTEL_QEPFLT, qep->qepflt);
502 intel_qep_writel(qep, INTEL_QEPMAX, qep->qepmax);
503 intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
504
505 /* Restore all other control register bits except enable status */
506 intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon & ~INTEL_QEPCON_EN);
507 intel_qep_readl(qep, INTEL_QEPCON);
508
509 /* Restore enable status */
510 intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon);
511
512 return 0;
513}
Jarkko Nikulab711f682021-06-02 14:32:59 +0300514
515static UNIVERSAL_DEV_PM_OPS(intel_qep_pm_ops,
516 intel_qep_suspend, intel_qep_resume, NULL);
517
518static const struct pci_device_id intel_qep_id_table[] = {
519 /* EHL */
520 { PCI_VDEVICE(INTEL, 0x4bc3), },
521 { PCI_VDEVICE(INTEL, 0x4b81), },
522 { PCI_VDEVICE(INTEL, 0x4b82), },
523 { PCI_VDEVICE(INTEL, 0x4b83), },
524 { } /* Terminating Entry */
525};
526MODULE_DEVICE_TABLE(pci, intel_qep_id_table);
527
528static struct pci_driver intel_qep_driver = {
529 .name = "intel-qep",
530 .id_table = intel_qep_id_table,
531 .probe = intel_qep_probe,
532 .remove = intel_qep_remove,
533 .driver = {
534 .pm = &intel_qep_pm_ops,
535 }
536};
537
538module_pci_driver(intel_qep_driver);
539
540MODULE_AUTHOR("Felipe Balbi (Intel)");
541MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
542MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
543MODULE_LICENSE("GPL");
544MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");