blob: faf9b8e8873a5b90a85639add876c00e0b118e46 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05302/*
3 * Mediated virtual PCI serial host device driver
4 *
5 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
6 * Author: Neo Jia <cjia@nvidia.com>
7 * Kirti Wankhede <kwankhede@nvidia.com>
8 *
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05309 * Sample driver that creates mdev device that simulates serial port over PCI
10 * card.
Kirti Wankhede9d1a5462016-11-17 02:16:33 +053011 */
12
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/device.h>
16#include <linux/kernel.h>
17#include <linux/fs.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/cdev.h>
21#include <linux/sched.h>
22#include <linux/wait.h>
23#include <linux/uuid.h>
24#include <linux/vfio.h>
25#include <linux/iommu.h>
26#include <linux/sysfs.h>
27#include <linux/ctype.h>
28#include <linux/file.h>
29#include <linux/mdev.h>
30#include <linux/pci.h>
31#include <linux/serial.h>
32#include <uapi/linux/serial_reg.h>
33#include <linux/eventfd.h>
34/*
35 * #defines
36 */
37
38#define VERSION_STRING "0.1"
39#define DRIVER_AUTHOR "NVIDIA Corporation"
40
41#define MTTY_CLASS_NAME "mtty"
42
43#define MTTY_NAME "mtty"
44
45#define MTTY_STRING_LEN 16
46
47#define MTTY_CONFIG_SPACE_SIZE 0xff
48#define MTTY_IO_BAR_SIZE 0x8
49#define MTTY_MMIO_BAR_SIZE 0x100000
50
51#define STORE_LE16(addr, val) (*(u16 *)addr = val)
52#define STORE_LE32(addr, val) (*(u32 *)addr = val)
53
54#define MAX_FIFO_SIZE 16
55
56#define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
57
58#define MTTY_VFIO_PCI_OFFSET_SHIFT 40
59
60#define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
61#define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
62 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
63#define MTTY_VFIO_PCI_OFFSET_MASK \
64 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
65#define MAX_MTTYS 24
66
67/*
68 * Global Structures
69 */
70
Kefeng Wang4b2dbd52019-07-02 11:38:08 -060071static struct mtty_dev {
Kirti Wankhede9d1a5462016-11-17 02:16:33 +053072 dev_t vd_devt;
73 struct class *vd_class;
74 struct cdev vd_cdev;
75 struct idr vd_idr;
76 struct device dev;
77} mtty_dev;
78
79struct mdev_region_info {
80 u64 start;
81 u64 phys_start;
82 u32 size;
83 u64 vfio_offset;
84};
85
86#if defined(DEBUG_REGS)
Kefeng Wang4b2dbd52019-07-02 11:38:08 -060087static const char *wr_reg[] = {
Kirti Wankhede9d1a5462016-11-17 02:16:33 +053088 "TX",
89 "IER",
90 "FCR",
91 "LCR",
92 "MCR",
93 "LSR",
94 "MSR",
95 "SCR"
96};
97
Kefeng Wang4b2dbd52019-07-02 11:38:08 -060098static const char *rd_reg[] = {
Kirti Wankhede9d1a5462016-11-17 02:16:33 +053099 "RX",
100 "IER",
101 "IIR",
102 "LCR",
103 "MCR",
104 "LSR",
105 "MSR",
106 "SCR"
107};
108#endif
109
110/* loop back buffer */
111struct rxtx {
112 u8 fifo[MAX_FIFO_SIZE];
113 u8 head, tail;
114 u8 count;
115};
116
117struct serial_port {
118 u8 uart_reg[8]; /* 8 registers */
119 struct rxtx rxtx; /* loop back buffer */
120 bool dlab;
121 bool overrun;
122 u16 divisor;
123 u8 fcr; /* FIFO control register */
124 u8 max_fifo_size;
125 u8 intr_trigger_level; /* interrupt trigger level */
126};
127
128/* State of each mdev device */
129struct mdev_state {
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200130 struct vfio_device vdev;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530131 int irq_fd;
132 struct eventfd_ctx *intx_evtfd;
133 struct eventfd_ctx *msi_evtfd;
134 int irq_index;
135 u8 *vconfig;
136 struct mutex ops_lock;
137 struct mdev_device *mdev;
138 struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
139 u32 bar_mask[VFIO_PCI_NUM_REGIONS];
140 struct list_head next;
141 struct serial_port s[2];
142 struct mutex rxtx_lock;
143 struct vfio_device_info dev_info;
144 int nr_ports;
145};
146
Kefeng Wang4b2dbd52019-07-02 11:38:08 -0600147static struct mutex mdev_list_lock;
148static struct list_head mdev_devices_list;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530149
150static const struct file_operations vd_fops = {
151 .owner = THIS_MODULE,
152};
153
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200154static const struct vfio_device_ops mtty_dev_ops;
155
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530156/* function prototypes */
157
Parav Panditeee413e2019-08-08 09:12:54 -0500158static int mtty_trigger_interrupt(struct mdev_state *mdev_state);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530159
160/* Helper functions */
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530161
Kefeng Wang4b2dbd52019-07-02 11:38:08 -0600162static void dump_buffer(u8 *buf, uint32_t count)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530163{
164#if defined(DEBUG)
165 int i;
166
167 pr_info("Buffer:\n");
168 for (i = 0; i < count; i++) {
169 pr_info("%2x ", *(buf + i));
170 if ((i + 1) % 16 == 0)
171 pr_info("\n");
172 }
173#endif
174}
175
176static void mtty_create_config_space(struct mdev_state *mdev_state)
177{
178 /* PCI dev ID */
179 STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
180
181 /* Control: I/O+, Mem-, BusMaster- */
182 STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
183
184 /* Status: capabilities list absent */
185 STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
186
187 /* Rev ID */
188 mdev_state->vconfig[0x8] = 0x10;
189
190 /* programming interface class : 16550-compatible serial controller */
191 mdev_state->vconfig[0x9] = 0x02;
192
193 /* Sub class : 00 */
194 mdev_state->vconfig[0xa] = 0x00;
195
196 /* Base class : Simple Communication controllers */
197 mdev_state->vconfig[0xb] = 0x07;
198
199 /* base address registers */
200 /* BAR0: IO space */
201 STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
202 mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
203
204 if (mdev_state->nr_ports == 2) {
205 /* BAR1: IO space */
206 STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
207 mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
208 }
209
210 /* Subsystem ID */
211 STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
212
213 mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */
214 mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */
215
216 /* Vendor specific data */
217 mdev_state->vconfig[0x40] = 0x23;
218 mdev_state->vconfig[0x43] = 0x80;
219 mdev_state->vconfig[0x44] = 0x23;
220 mdev_state->vconfig[0x48] = 0x23;
221 mdev_state->vconfig[0x4c] = 0x23;
222
223 mdev_state->vconfig[0x60] = 0x50;
224 mdev_state->vconfig[0x61] = 0x43;
225 mdev_state->vconfig[0x62] = 0x49;
226 mdev_state->vconfig[0x63] = 0x20;
227 mdev_state->vconfig[0x64] = 0x53;
228 mdev_state->vconfig[0x65] = 0x65;
229 mdev_state->vconfig[0x66] = 0x72;
230 mdev_state->vconfig[0x67] = 0x69;
231 mdev_state->vconfig[0x68] = 0x61;
232 mdev_state->vconfig[0x69] = 0x6c;
233 mdev_state->vconfig[0x6a] = 0x2f;
234 mdev_state->vconfig[0x6b] = 0x55;
235 mdev_state->vconfig[0x6c] = 0x41;
236 mdev_state->vconfig[0x6d] = 0x52;
237 mdev_state->vconfig[0x6e] = 0x54;
238}
239
240static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
Nathan Chancellor8ba35b32018-10-19 11:04:27 -0700241 u8 *buf, u32 count)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530242{
243 u32 cfg_addr, bar_mask, bar_index = 0;
244
245 switch (offset) {
246 case 0x04: /* device control */
247 case 0x06: /* device status */
248 /* do nothing */
249 break;
250 case 0x3c: /* interrupt line */
251 mdev_state->vconfig[0x3c] = buf[0];
252 break;
253 case 0x3d:
254 /*
255 * Interrupt Pin is hardwired to INTA.
256 * This field is write protected by hardware
257 */
258 break;
259 case 0x10: /* BAR0 */
260 case 0x14: /* BAR1 */
261 if (offset == 0x10)
262 bar_index = 0;
263 else if (offset == 0x14)
264 bar_index = 1;
265
266 if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
267 STORE_LE32(&mdev_state->vconfig[offset], 0);
268 break;
269 }
270
271 cfg_addr = *(u32 *)buf;
272 pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
273
274 if (cfg_addr == 0xffffffff) {
275 bar_mask = mdev_state->bar_mask[bar_index];
276 cfg_addr = (cfg_addr & bar_mask);
277 }
278
279 cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
280 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
281 break;
282 case 0x18: /* BAR2 */
283 case 0x1c: /* BAR3 */
284 case 0x20: /* BAR4 */
285 STORE_LE32(&mdev_state->vconfig[offset], 0);
286 break;
287 default:
288 pr_info("PCI config write @0x%x of %d bytes not handled\n",
289 offset, count);
290 break;
291 }
292}
293
294static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
Nathan Chancellor8ba35b32018-10-19 11:04:27 -0700295 u16 offset, u8 *buf, u32 count)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530296{
297 u8 data = *buf;
298
299 /* Handle data written by guest */
300 switch (offset) {
301 case UART_TX:
302 /* if DLAB set, data is LSB of divisor */
303 if (mdev_state->s[index].dlab) {
304 mdev_state->s[index].divisor |= data;
305 break;
306 }
307
308 mutex_lock(&mdev_state->rxtx_lock);
309
310 /* save in TX buffer */
311 if (mdev_state->s[index].rxtx.count <
312 mdev_state->s[index].max_fifo_size) {
313 mdev_state->s[index].rxtx.fifo[
314 mdev_state->s[index].rxtx.head] = data;
315 mdev_state->s[index].rxtx.count++;
316 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
317 mdev_state->s[index].overrun = false;
318
319 /*
320 * Trigger interrupt if receive data interrupt is
321 * enabled and fifo reached trigger level
322 */
323 if ((mdev_state->s[index].uart_reg[UART_IER] &
324 UART_IER_RDI) &&
325 (mdev_state->s[index].rxtx.count ==
326 mdev_state->s[index].intr_trigger_level)) {
327 /* trigger interrupt */
328#if defined(DEBUG_INTR)
329 pr_err("Serial port %d: Fifo level trigger\n",
330 index);
331#endif
Parav Panditeee413e2019-08-08 09:12:54 -0500332 mtty_trigger_interrupt(mdev_state);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530333 }
334 } else {
335#if defined(DEBUG_INTR)
336 pr_err("Serial port %d: Buffer Overflow\n", index);
337#endif
338 mdev_state->s[index].overrun = true;
339
340 /*
341 * Trigger interrupt if receiver line status interrupt
342 * is enabled
343 */
344 if (mdev_state->s[index].uart_reg[UART_IER] &
345 UART_IER_RLSI)
Parav Panditeee413e2019-08-08 09:12:54 -0500346 mtty_trigger_interrupt(mdev_state);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530347 }
348 mutex_unlock(&mdev_state->rxtx_lock);
349 break;
350
351 case UART_IER:
352 /* if DLAB set, data is MSB of divisor */
353 if (mdev_state->s[index].dlab)
354 mdev_state->s[index].divisor |= (u16)data << 8;
355 else {
356 mdev_state->s[index].uart_reg[offset] = data;
357 mutex_lock(&mdev_state->rxtx_lock);
358 if ((data & UART_IER_THRI) &&
359 (mdev_state->s[index].rxtx.head ==
360 mdev_state->s[index].rxtx.tail)) {
361#if defined(DEBUG_INTR)
362 pr_err("Serial port %d: IER_THRI write\n",
363 index);
364#endif
Parav Panditeee413e2019-08-08 09:12:54 -0500365 mtty_trigger_interrupt(mdev_state);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530366 }
367
368 mutex_unlock(&mdev_state->rxtx_lock);
369 }
370
371 break;
372
373 case UART_FCR:
374 mdev_state->s[index].fcr = data;
375
376 mutex_lock(&mdev_state->rxtx_lock);
377 if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
378 /* clear loop back FIFO */
379 mdev_state->s[index].rxtx.count = 0;
380 mdev_state->s[index].rxtx.head = 0;
381 mdev_state->s[index].rxtx.tail = 0;
382 }
383 mutex_unlock(&mdev_state->rxtx_lock);
384
385 switch (data & UART_FCR_TRIGGER_MASK) {
386 case UART_FCR_TRIGGER_1:
387 mdev_state->s[index].intr_trigger_level = 1;
388 break;
389
390 case UART_FCR_TRIGGER_4:
391 mdev_state->s[index].intr_trigger_level = 4;
392 break;
393
394 case UART_FCR_TRIGGER_8:
395 mdev_state->s[index].intr_trigger_level = 8;
396 break;
397
398 case UART_FCR_TRIGGER_14:
399 mdev_state->s[index].intr_trigger_level = 14;
400 break;
401 }
402
403 /*
404 * Set trigger level to 1 otherwise or implement timer with
405 * timeout of 4 characters and on expiring that timer set
406 * Recevice data timeout in IIR register
407 */
408 mdev_state->s[index].intr_trigger_level = 1;
409 if (data & UART_FCR_ENABLE_FIFO)
410 mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
411 else {
412 mdev_state->s[index].max_fifo_size = 1;
413 mdev_state->s[index].intr_trigger_level = 1;
414 }
415
416 break;
417
418 case UART_LCR:
419 if (data & UART_LCR_DLAB) {
420 mdev_state->s[index].dlab = true;
421 mdev_state->s[index].divisor = 0;
422 } else
423 mdev_state->s[index].dlab = false;
424
425 mdev_state->s[index].uart_reg[offset] = data;
426 break;
427
428 case UART_MCR:
429 mdev_state->s[index].uart_reg[offset] = data;
430
431 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
432 (data & UART_MCR_OUT2)) {
433#if defined(DEBUG_INTR)
434 pr_err("Serial port %d: MCR_OUT2 write\n", index);
435#endif
Parav Panditeee413e2019-08-08 09:12:54 -0500436 mtty_trigger_interrupt(mdev_state);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530437 }
438
439 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
440 (data & (UART_MCR_RTS | UART_MCR_DTR))) {
441#if defined(DEBUG_INTR)
442 pr_err("Serial port %d: MCR RTS/DTR write\n", index);
443#endif
Parav Panditeee413e2019-08-08 09:12:54 -0500444 mtty_trigger_interrupt(mdev_state);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530445 }
446 break;
447
448 case UART_LSR:
449 case UART_MSR:
450 /* do nothing */
451 break;
452
453 case UART_SCR:
454 mdev_state->s[index].uart_reg[offset] = data;
455 break;
456
457 default:
458 break;
459 }
460}
461
462static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
Nathan Chancellor8ba35b32018-10-19 11:04:27 -0700463 u16 offset, u8 *buf, u32 count)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530464{
465 /* Handle read requests by guest */
466 switch (offset) {
467 case UART_RX:
468 /* if DLAB set, data is LSB of divisor */
469 if (mdev_state->s[index].dlab) {
470 *buf = (u8)mdev_state->s[index].divisor;
471 break;
472 }
473
474 mutex_lock(&mdev_state->rxtx_lock);
475 /* return data in tx buffer */
476 if (mdev_state->s[index].rxtx.head !=
477 mdev_state->s[index].rxtx.tail) {
478 *buf = mdev_state->s[index].rxtx.fifo[
479 mdev_state->s[index].rxtx.tail];
480 mdev_state->s[index].rxtx.count--;
481 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
482 }
483
484 if (mdev_state->s[index].rxtx.head ==
485 mdev_state->s[index].rxtx.tail) {
486 /*
487 * Trigger interrupt if tx buffer empty interrupt is
488 * enabled and fifo is empty
489 */
490#if defined(DEBUG_INTR)
491 pr_err("Serial port %d: Buffer Empty\n", index);
492#endif
493 if (mdev_state->s[index].uart_reg[UART_IER] &
494 UART_IER_THRI)
Parav Panditeee413e2019-08-08 09:12:54 -0500495 mtty_trigger_interrupt(mdev_state);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530496 }
497 mutex_unlock(&mdev_state->rxtx_lock);
498
499 break;
500
501 case UART_IER:
502 if (mdev_state->s[index].dlab) {
503 *buf = (u8)(mdev_state->s[index].divisor >> 8);
504 break;
505 }
506 *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
507 break;
508
509 case UART_IIR:
510 {
511 u8 ier = mdev_state->s[index].uart_reg[UART_IER];
512 *buf = 0;
513
514 mutex_lock(&mdev_state->rxtx_lock);
515 /* Interrupt priority 1: Parity, overrun, framing or break */
516 if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
517 *buf |= UART_IIR_RLSI;
518
519 /* Interrupt priority 2: Fifo trigger level reached */
520 if ((ier & UART_IER_RDI) &&
Shunyong Yangc9f89c32018-03-21 12:46:23 -0600521 (mdev_state->s[index].rxtx.count >=
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530522 mdev_state->s[index].intr_trigger_level))
523 *buf |= UART_IIR_RDI;
524
525 /* Interrupt priotiry 3: transmitter holding register empty */
526 if ((ier & UART_IER_THRI) &&
527 (mdev_state->s[index].rxtx.head ==
528 mdev_state->s[index].rxtx.tail))
529 *buf |= UART_IIR_THRI;
530
531 /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */
532 if ((ier & UART_IER_MSI) &&
533 (mdev_state->s[index].uart_reg[UART_MCR] &
534 (UART_MCR_RTS | UART_MCR_DTR)))
535 *buf |= UART_IIR_MSI;
536
537 /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
538 if (*buf == 0)
539 *buf = UART_IIR_NO_INT;
540
541 /* set bit 6 & 7 to be 16550 compatible */
542 *buf |= 0xC0;
543 mutex_unlock(&mdev_state->rxtx_lock);
544 }
545 break;
546
547 case UART_LCR:
548 case UART_MCR:
549 *buf = mdev_state->s[index].uart_reg[offset];
550 break;
551
552 case UART_LSR:
553 {
554 u8 lsr = 0;
555
556 mutex_lock(&mdev_state->rxtx_lock);
557 /* atleast one char in FIFO */
558 if (mdev_state->s[index].rxtx.head !=
559 mdev_state->s[index].rxtx.tail)
560 lsr |= UART_LSR_DR;
561
562 /* if FIFO overrun */
563 if (mdev_state->s[index].overrun)
564 lsr |= UART_LSR_OE;
565
566 /* transmit FIFO empty and tramsitter empty */
567 if (mdev_state->s[index].rxtx.head ==
568 mdev_state->s[index].rxtx.tail)
569 lsr |= UART_LSR_TEMT | UART_LSR_THRE;
570
571 mutex_unlock(&mdev_state->rxtx_lock);
572 *buf = lsr;
573 break;
574 }
575 case UART_MSR:
576 *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
577
578 mutex_lock(&mdev_state->rxtx_lock);
579 /* if AFE is 1 and FIFO have space, set CTS bit */
580 if (mdev_state->s[index].uart_reg[UART_MCR] &
581 UART_MCR_AFE) {
582 if (mdev_state->s[index].rxtx.count <
583 mdev_state->s[index].max_fifo_size)
584 *buf |= UART_MSR_CTS | UART_MSR_DCTS;
585 } else
586 *buf |= UART_MSR_CTS | UART_MSR_DCTS;
587 mutex_unlock(&mdev_state->rxtx_lock);
588
589 break;
590
591 case UART_SCR:
592 *buf = mdev_state->s[index].uart_reg[offset];
593 break;
594
595 default:
596 break;
597 }
598}
599
600static void mdev_read_base(struct mdev_state *mdev_state)
601{
602 int index, pos;
603 u32 start_lo, start_hi;
604 u32 mem_type;
605
606 pos = PCI_BASE_ADDRESS_0;
607
608 for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
609
610 if (!mdev_state->region_info[index].size)
611 continue;
612
613 start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
614 PCI_BASE_ADDRESS_MEM_MASK;
615 mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
616 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
617
618 switch (mem_type) {
619 case PCI_BASE_ADDRESS_MEM_TYPE_64:
620 start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
621 pos += 4;
622 break;
623 case PCI_BASE_ADDRESS_MEM_TYPE_32:
624 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
625 /* 1M mem BAR treated as 32-bit BAR */
626 default:
627 /* mem unknown type treated as 32-bit BAR */
628 start_hi = 0;
629 break;
630 }
631 pos += 4;
632 mdev_state->region_info[index].start = ((u64)start_hi << 32) |
633 start_lo;
634 }
635}
636
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200637static ssize_t mdev_access(struct mdev_state *mdev_state, u8 *buf, size_t count,
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530638 loff_t pos, bool is_write)
639{
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530640 unsigned int index;
641 loff_t offset;
642 int ret = 0;
643
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200644 if (!buf)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530645 return -EINVAL;
646
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530647 mutex_lock(&mdev_state->ops_lock);
648
649 index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
650 offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
651 switch (index) {
652 case VFIO_PCI_CONFIG_REGION_INDEX:
653
654#if defined(DEBUG)
655 pr_info("%s: PCI config space %s at offset 0x%llx\n",
656 __func__, is_write ? "write" : "read", offset);
657#endif
658 if (is_write) {
659 dump_buffer(buf, count);
660 handle_pci_cfg_write(mdev_state, offset, buf, count);
661 } else {
662 memcpy(buf, (mdev_state->vconfig + offset), count);
663 dump_buffer(buf, count);
664 }
665
666 break;
667
668 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
669 if (!mdev_state->region_info[index].start)
670 mdev_read_base(mdev_state);
671
672 if (is_write) {
673 dump_buffer(buf, count);
674
675#if defined(DEBUG_REGS)
676 pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n",
677 __func__, index, offset, wr_reg[offset],
Nathan Chancellor8ba35b32018-10-19 11:04:27 -0700678 *buf, mdev_state->s[index].dlab);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530679#endif
680 handle_bar_write(index, mdev_state, offset, buf, count);
681 } else {
682 handle_bar_read(index, mdev_state, offset, buf, count);
683 dump_buffer(buf, count);
684
685#if defined(DEBUG_REGS)
686 pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n",
687 __func__, index, offset, rd_reg[offset],
Nathan Chancellor8ba35b32018-10-19 11:04:27 -0700688 *buf, mdev_state->s[index].dlab);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530689#endif
690 }
691 break;
692
693 default:
694 ret = -1;
695 goto accessfailed;
696 }
697
698 ret = count;
699
700
701accessfailed:
702 mutex_unlock(&mdev_state->ops_lock);
703
704 return ret;
705}
706
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200707static int mtty_probe(struct mdev_device *mdev)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530708{
709 struct mdev_state *mdev_state;
Jason Gunthorpec594b262021-04-06 16:40:35 -0300710 int nr_ports = mdev_get_type_group_id(mdev) + 1;
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200711 int ret;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530712
713 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
714 if (mdev_state == NULL)
715 return -ENOMEM;
716
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200717 vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mtty_dev_ops);
718
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530719 mdev_state->nr_ports = nr_ports;
720 mdev_state->irq_index = -1;
721 mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
722 mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
723 mutex_init(&mdev_state->rxtx_lock);
724 mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
725
726 if (mdev_state->vconfig == NULL) {
727 kfree(mdev_state);
728 return -ENOMEM;
729 }
730
731 mutex_init(&mdev_state->ops_lock);
732 mdev_state->mdev = mdev;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530733
734 mtty_create_config_space(mdev_state);
735
736 mutex_lock(&mdev_list_lock);
737 list_add(&mdev_state->next, &mdev_devices_list);
738 mutex_unlock(&mdev_list_lock);
739
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200740 ret = vfio_register_group_dev(&mdev_state->vdev);
741 if (ret) {
742 kfree(mdev_state);
743 return ret;
744 }
745 dev_set_drvdata(&mdev->dev, mdev_state);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530746 return 0;
747}
748
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200749static void mtty_remove(struct mdev_device *mdev)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530750{
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200751 struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530752
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200753 vfio_unregister_group_dev(&mdev_state->vdev);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530754 mutex_lock(&mdev_list_lock);
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200755 list_del(&mdev_state->next);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530756 mutex_unlock(&mdev_list_lock);
757
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200758 kfree(mdev_state->vconfig);
759 kfree(mdev_state);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530760}
761
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200762static int mtty_reset(struct mdev_state *mdev_state)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530763{
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530764 pr_info("%s: called\n", __func__);
765
766 return 0;
767}
768
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200769static ssize_t mtty_read(struct vfio_device *vdev, char __user *buf,
Kefeng Wang4b2dbd52019-07-02 11:38:08 -0600770 size_t count, loff_t *ppos)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530771{
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200772 struct mdev_state *mdev_state =
773 container_of(vdev, struct mdev_state, vdev);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530774 unsigned int done = 0;
775 int ret;
776
777 while (count) {
778 size_t filled;
779
780 if (count >= 4 && !(*ppos % 4)) {
781 u32 val;
782
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200783 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530784 *ppos, false);
785 if (ret <= 0)
786 goto read_err;
787
788 if (copy_to_user(buf, &val, sizeof(val)))
789 goto read_err;
790
791 filled = 4;
792 } else if (count >= 2 && !(*ppos % 2)) {
793 u16 val;
794
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200795 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530796 *ppos, false);
797 if (ret <= 0)
798 goto read_err;
799
800 if (copy_to_user(buf, &val, sizeof(val)))
801 goto read_err;
802
803 filled = 2;
804 } else {
805 u8 val;
806
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200807 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530808 *ppos, false);
809 if (ret <= 0)
810 goto read_err;
811
812 if (copy_to_user(buf, &val, sizeof(val)))
813 goto read_err;
814
815 filled = 1;
816 }
817
818 count -= filled;
819 done += filled;
820 *ppos += filled;
821 buf += filled;
822 }
823
824 return done;
825
826read_err:
827 return -EFAULT;
828}
829
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200830static ssize_t mtty_write(struct vfio_device *vdev, const char __user *buf,
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530831 size_t count, loff_t *ppos)
832{
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200833 struct mdev_state *mdev_state =
834 container_of(vdev, struct mdev_state, vdev);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530835 unsigned int done = 0;
836 int ret;
837
838 while (count) {
839 size_t filled;
840
841 if (count >= 4 && !(*ppos % 4)) {
842 u32 val;
843
844 if (copy_from_user(&val, buf, sizeof(val)))
845 goto write_err;
846
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200847 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530848 *ppos, true);
849 if (ret <= 0)
850 goto write_err;
851
852 filled = 4;
853 } else if (count >= 2 && !(*ppos % 2)) {
854 u16 val;
855
856 if (copy_from_user(&val, buf, sizeof(val)))
857 goto write_err;
858
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200859 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530860 *ppos, true);
861 if (ret <= 0)
862 goto write_err;
863
864 filled = 2;
865 } else {
866 u8 val;
867
868 if (copy_from_user(&val, buf, sizeof(val)))
869 goto write_err;
870
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200871 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530872 *ppos, true);
873 if (ret <= 0)
874 goto write_err;
875
876 filled = 1;
877 }
878 count -= filled;
879 done += filled;
880 *ppos += filled;
881 buf += filled;
882 }
883
884 return done;
885write_err:
886 return -EFAULT;
887}
888
Jason Gunthorpe09177ac2021-06-17 16:22:16 +0200889static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530890 unsigned int index, unsigned int start,
891 unsigned int count, void *data)
892{
893 int ret = 0;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530894
895 mutex_lock(&mdev_state->ops_lock);
896 switch (index) {
897 case VFIO_PCI_INTX_IRQ_INDEX:
898 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
899 case VFIO_IRQ_SET_ACTION_MASK:
900 case VFIO_IRQ_SET_ACTION_UNMASK:
901 break;
902 case VFIO_IRQ_SET_ACTION_TRIGGER:
903 {
904 if (flags & VFIO_IRQ_SET_DATA_NONE) {
905 pr_info("%s: disable INTx\n", __func__);
906 if (mdev_state->intx_evtfd)
907 eventfd_ctx_put(mdev_state->intx_evtfd);
908 break;
909 }
910
911 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
912 int fd = *(int *)data;
913
914 if (fd > 0) {
915 struct eventfd_ctx *evt;
916
917 evt = eventfd_ctx_fdget(fd);
918 if (IS_ERR(evt)) {
919 ret = PTR_ERR(evt);
920 break;
921 }
922 mdev_state->intx_evtfd = evt;
923 mdev_state->irq_fd = fd;
924 mdev_state->irq_index = index;
925 break;
926 }
927 }
928 break;
929 }
930 }
931 break;
932 case VFIO_PCI_MSI_IRQ_INDEX:
933 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
934 case VFIO_IRQ_SET_ACTION_MASK:
935 case VFIO_IRQ_SET_ACTION_UNMASK:
936 break;
937 case VFIO_IRQ_SET_ACTION_TRIGGER:
938 if (flags & VFIO_IRQ_SET_DATA_NONE) {
939 if (mdev_state->msi_evtfd)
940 eventfd_ctx_put(mdev_state->msi_evtfd);
941 pr_info("%s: disable MSI\n", __func__);
942 mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
943 break;
944 }
945 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
946 int fd = *(int *)data;
947 struct eventfd_ctx *evt;
948
949 if (fd <= 0)
950 break;
951
952 if (mdev_state->msi_evtfd)
953 break;
954
955 evt = eventfd_ctx_fdget(fd);
956 if (IS_ERR(evt)) {
957 ret = PTR_ERR(evt);
958 break;
959 }
960 mdev_state->msi_evtfd = evt;
961 mdev_state->irq_fd = fd;
962 mdev_state->irq_index = index;
963 }
964 break;
965 }
966 break;
967 case VFIO_PCI_MSIX_IRQ_INDEX:
968 pr_info("%s: MSIX_IRQ\n", __func__);
969 break;
970 case VFIO_PCI_ERR_IRQ_INDEX:
971 pr_info("%s: ERR_IRQ\n", __func__);
972 break;
973 case VFIO_PCI_REQ_IRQ_INDEX:
974 pr_info("%s: REQ_IRQ\n", __func__);
975 break;
976 }
977
978 mutex_unlock(&mdev_state->ops_lock);
979 return ret;
980}
981
Parav Panditeee413e2019-08-08 09:12:54 -0500982static int mtty_trigger_interrupt(struct mdev_state *mdev_state)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530983{
984 int ret = -1;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +0530985
986 if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
987 (!mdev_state->msi_evtfd))
988 return -EINVAL;
989 else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
990 (!mdev_state->intx_evtfd)) {
991 pr_info("%s: Intr eventfd not found\n", __func__);
992 return -EINVAL;
993 }
994
995 if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
996 ret = eventfd_signal(mdev_state->msi_evtfd, 1);
997 else
998 ret = eventfd_signal(mdev_state->intx_evtfd, 1);
999
1000#if defined(DEBUG_INTR)
1001 pr_info("Intx triggered\n");
1002#endif
1003 if (ret != 1)
1004 pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
1005
1006 return ret;
1007}
1008
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001009static int mtty_get_region_info(struct mdev_state *mdev_state,
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301010 struct vfio_region_info *region_info,
1011 u16 *cap_type_id, void **cap_type)
1012{
1013 unsigned int size = 0;
Dan Carpenter5c677862017-01-07 09:28:40 +03001014 u32 bar_index;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301015
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301016 bar_index = region_info->index;
Dan Carpenter5c677862017-01-07 09:28:40 +03001017 if (bar_index >= VFIO_PCI_NUM_REGIONS)
1018 return -EINVAL;
1019
1020 mutex_lock(&mdev_state->ops_lock);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301021
1022 switch (bar_index) {
1023 case VFIO_PCI_CONFIG_REGION_INDEX:
1024 size = MTTY_CONFIG_SPACE_SIZE;
1025 break;
1026 case VFIO_PCI_BAR0_REGION_INDEX:
1027 size = MTTY_IO_BAR_SIZE;
1028 break;
1029 case VFIO_PCI_BAR1_REGION_INDEX:
1030 if (mdev_state->nr_ports == 2)
1031 size = MTTY_IO_BAR_SIZE;
1032 break;
1033 default:
1034 size = 0;
1035 break;
1036 }
1037
1038 mdev_state->region_info[bar_index].size = size;
1039 mdev_state->region_info[bar_index].vfio_offset =
1040 MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1041
1042 region_info->size = size;
1043 region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1044 region_info->flags = VFIO_REGION_INFO_FLAG_READ |
1045 VFIO_REGION_INFO_FLAG_WRITE;
1046 mutex_unlock(&mdev_state->ops_lock);
1047 return 0;
1048}
1049
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001050static int mtty_get_irq_info(struct vfio_irq_info *irq_info)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301051{
1052 switch (irq_info->index) {
1053 case VFIO_PCI_INTX_IRQ_INDEX:
1054 case VFIO_PCI_MSI_IRQ_INDEX:
1055 case VFIO_PCI_REQ_IRQ_INDEX:
1056 break;
1057
1058 default:
1059 return -EINVAL;
1060 }
1061
1062 irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
1063 irq_info->count = 1;
1064
1065 if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
1066 irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
1067 VFIO_IRQ_INFO_AUTOMASKED);
1068 else
1069 irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
1070
1071 return 0;
1072}
1073
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001074static int mtty_get_device_info(struct vfio_device_info *dev_info)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301075{
1076 dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
1077 dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
1078 dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
1079
1080 return 0;
1081}
1082
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001083static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd,
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301084 unsigned long arg)
1085{
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001086 struct mdev_state *mdev_state =
1087 container_of(vdev, struct mdev_state, vdev);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301088 int ret = 0;
1089 unsigned long minsz;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301090
1091 switch (cmd) {
1092 case VFIO_DEVICE_GET_INFO:
1093 {
1094 struct vfio_device_info info;
1095
1096 minsz = offsetofend(struct vfio_device_info, num_irqs);
1097
1098 if (copy_from_user(&info, (void __user *)arg, minsz))
1099 return -EFAULT;
1100
1101 if (info.argsz < minsz)
1102 return -EINVAL;
1103
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001104 ret = mtty_get_device_info(&info);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301105 if (ret)
1106 return ret;
1107
1108 memcpy(&mdev_state->dev_info, &info, sizeof(info));
1109
Dan Carpenter6ed0993a2017-01-07 09:27:49 +03001110 if (copy_to_user((void __user *)arg, &info, minsz))
1111 return -EFAULT;
1112
1113 return 0;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301114 }
1115 case VFIO_DEVICE_GET_REGION_INFO:
1116 {
1117 struct vfio_region_info info;
1118 u16 cap_type_id = 0;
1119 void *cap_type = NULL;
1120
1121 minsz = offsetofend(struct vfio_region_info, offset);
1122
1123 if (copy_from_user(&info, (void __user *)arg, minsz))
1124 return -EFAULT;
1125
1126 if (info.argsz < minsz)
1127 return -EINVAL;
1128
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001129 ret = mtty_get_region_info(mdev_state, &info, &cap_type_id,
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301130 &cap_type);
1131 if (ret)
1132 return ret;
1133
Dan Carpenter6ed0993a2017-01-07 09:27:49 +03001134 if (copy_to_user((void __user *)arg, &info, minsz))
1135 return -EFAULT;
1136
1137 return 0;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301138 }
1139
1140 case VFIO_DEVICE_GET_IRQ_INFO:
1141 {
1142 struct vfio_irq_info info;
1143
1144 minsz = offsetofend(struct vfio_irq_info, count);
1145
1146 if (copy_from_user(&info, (void __user *)arg, minsz))
1147 return -EFAULT;
1148
1149 if ((info.argsz < minsz) ||
1150 (info.index >= mdev_state->dev_info.num_irqs))
1151 return -EINVAL;
1152
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001153 ret = mtty_get_irq_info(&info);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301154 if (ret)
1155 return ret;
1156
Dan Carpenter6ed0993a2017-01-07 09:27:49 +03001157 if (copy_to_user((void __user *)arg, &info, minsz))
1158 return -EFAULT;
1159
1160 return 0;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301161 }
1162 case VFIO_DEVICE_SET_IRQS:
1163 {
1164 struct vfio_irq_set hdr;
1165 u8 *data = NULL, *ptr = NULL;
1166 size_t data_size = 0;
1167
1168 minsz = offsetofend(struct vfio_irq_set, count);
1169
1170 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1171 return -EFAULT;
1172
1173 ret = vfio_set_irqs_validate_and_prepare(&hdr,
1174 mdev_state->dev_info.num_irqs,
1175 VFIO_PCI_NUM_IRQS,
1176 &data_size);
1177 if (ret)
1178 return ret;
1179
1180 if (data_size) {
1181 ptr = data = memdup_user((void __user *)(arg + minsz),
1182 data_size);
1183 if (IS_ERR(data))
1184 return PTR_ERR(data);
1185 }
1186
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001187 ret = mtty_set_irqs(mdev_state, hdr.flags, hdr.index, hdr.start,
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301188 hdr.count, data);
1189
1190 kfree(ptr);
1191 return ret;
1192 }
1193 case VFIO_DEVICE_RESET:
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001194 return mtty_reset(mdev_state);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301195 }
1196 return -ENOTTY;
1197}
1198
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001199static int mtty_open(struct vfio_device *vdev)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301200{
1201 pr_info("%s\n", __func__);
1202 return 0;
1203}
1204
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001205static void mtty_close(struct vfio_device *mdev)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301206{
1207 pr_info("%s\n", __func__);
1208}
1209
1210static ssize_t
1211sample_mtty_dev_show(struct device *dev, struct device_attribute *attr,
1212 char *buf)
1213{
1214 return sprintf(buf, "This is phy device\n");
1215}
1216
1217static DEVICE_ATTR_RO(sample_mtty_dev);
1218
1219static struct attribute *mtty_dev_attrs[] = {
1220 &dev_attr_sample_mtty_dev.attr,
1221 NULL,
1222};
1223
1224static const struct attribute_group mtty_dev_group = {
1225 .name = "mtty_dev",
1226 .attrs = mtty_dev_attrs,
1227};
1228
Kefeng Wang4b2dbd52019-07-02 11:38:08 -06001229static const struct attribute_group *mtty_dev_groups[] = {
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301230 &mtty_dev_group,
1231 NULL,
1232};
1233
1234static ssize_t
1235sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
1236 char *buf)
1237{
Alex Williamson99e31232016-12-30 08:13:44 -07001238 if (mdev_from_dev(dev))
1239 return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301240
1241 return sprintf(buf, "\n");
1242}
1243
1244static DEVICE_ATTR_RO(sample_mdev_dev);
1245
1246static struct attribute *mdev_dev_attrs[] = {
1247 &dev_attr_sample_mdev_dev.attr,
1248 NULL,
1249};
1250
1251static const struct attribute_group mdev_dev_group = {
1252 .name = "vendor",
1253 .attrs = mdev_dev_attrs,
1254};
1255
Kefeng Wang4b2dbd52019-07-02 11:38:08 -06001256static const struct attribute_group *mdev_dev_groups[] = {
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301257 &mdev_dev_group,
1258 NULL,
1259};
1260
Jason Gunthorpe9169cff2021-04-06 16:40:41 -03001261static ssize_t name_show(struct mdev_type *mtype,
1262 struct mdev_type_attribute *attr, char *buf)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301263{
Jason Gunthorpec594b262021-04-06 16:40:35 -03001264 static const char *name_str[2] = { "Single port serial",
1265 "Dual port serial" };
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301266
Jason Gunthorpec594b262021-04-06 16:40:35 -03001267 return sysfs_emit(buf, "%s\n",
Jason Gunthorpe9169cff2021-04-06 16:40:41 -03001268 name_str[mtype_get_type_group_id(mtype)]);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301269}
1270
Kefeng Wang4b2dbd52019-07-02 11:38:08 -06001271static MDEV_TYPE_ATTR_RO(name);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301272
Jason Gunthorpe9169cff2021-04-06 16:40:41 -03001273static ssize_t available_instances_show(struct mdev_type *mtype,
1274 struct mdev_type_attribute *attr,
1275 char *buf)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301276{
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301277 struct mdev_state *mds;
Jason Gunthorpe9169cff2021-04-06 16:40:41 -03001278 unsigned int ports = mtype_get_type_group_id(mtype) + 1;
Jason Gunthorpec594b262021-04-06 16:40:35 -03001279 int used = 0;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301280
1281 list_for_each_entry(mds, &mdev_devices_list, next)
1282 used += mds->nr_ports;
1283
1284 return sprintf(buf, "%d\n", (MAX_MTTYS - used)/ports);
1285}
1286
Kefeng Wang4b2dbd52019-07-02 11:38:08 -06001287static MDEV_TYPE_ATTR_RO(available_instances);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301288
Jason Gunthorpe9169cff2021-04-06 16:40:41 -03001289static ssize_t device_api_show(struct mdev_type *mtype,
1290 struct mdev_type_attribute *attr, char *buf)
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301291{
1292 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
1293}
1294
Kefeng Wang4b2dbd52019-07-02 11:38:08 -06001295static MDEV_TYPE_ATTR_RO(device_api);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301296
1297static struct attribute *mdev_types_attrs[] = {
1298 &mdev_type_attr_name.attr,
1299 &mdev_type_attr_device_api.attr,
1300 &mdev_type_attr_available_instances.attr,
1301 NULL,
1302};
1303
1304static struct attribute_group mdev_type_group1 = {
1305 .name = "1",
1306 .attrs = mdev_types_attrs,
1307};
1308
1309static struct attribute_group mdev_type_group2 = {
1310 .name = "2",
1311 .attrs = mdev_types_attrs,
1312};
1313
Kefeng Wang4b2dbd52019-07-02 11:38:08 -06001314static struct attribute_group *mdev_type_groups[] = {
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301315 &mdev_type_group1,
1316 &mdev_type_group2,
1317 NULL,
1318};
1319
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001320static const struct vfio_device_ops mtty_dev_ops = {
1321 .name = "vfio-mtty",
1322 .open = mtty_open,
1323 .release = mtty_close,
1324 .read = mtty_read,
1325 .write = mtty_write,
1326 .ioctl = mtty_ioctl,
1327};
1328
1329static struct mdev_driver mtty_driver = {
1330 .driver = {
1331 .name = "mtty",
1332 .owner = THIS_MODULE,
1333 .mod_name = KBUILD_MODNAME,
1334 .dev_groups = mdev_dev_groups,
1335 },
1336 .probe = mtty_probe,
1337 .remove = mtty_remove,
1338};
1339
Bhumika Goyal79d40372017-09-30 21:47:24 +05301340static const struct mdev_parent_ops mdev_fops = {
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301341 .owner = THIS_MODULE,
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001342 .device_driver = &mtty_driver,
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301343 .dev_attr_groups = mtty_dev_groups,
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301344 .supported_type_groups = mdev_type_groups,
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301345};
1346
1347static void mtty_device_release(struct device *dev)
1348{
1349 dev_dbg(dev, "mtty: released\n");
1350}
1351
1352static int __init mtty_dev_init(void)
1353{
1354 int ret = 0;
1355
1356 pr_info("mtty_dev: %s\n", __func__);
1357
1358 memset(&mtty_dev, 0, sizeof(mtty_dev));
1359
1360 idr_init(&mtty_dev.vd_idr);
1361
Chengguang Xu3e4835f2019-02-12 13:59:32 +08001362 ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK + 1,
1363 MTTY_NAME);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301364
1365 if (ret < 0) {
1366 pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
1367 return ret;
1368 }
1369
1370 cdev_init(&mtty_dev.vd_cdev, &vd_fops);
Chengguang Xu3e4835f2019-02-12 13:59:32 +08001371 cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK + 1);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301372
1373 pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
1374
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001375 ret = mdev_register_driver(&mtty_driver);
1376 if (ret)
1377 goto err_cdev;
1378
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301379 mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
1380
1381 if (IS_ERR(mtty_dev.vd_class)) {
1382 pr_err("Error: failed to register mtty_dev class\n");
Dan Carpenterd293dba2016-11-24 14:27:26 +03001383 ret = PTR_ERR(mtty_dev.vd_class);
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001384 goto err_driver;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301385 }
1386
1387 mtty_dev.dev.class = mtty_dev.vd_class;
1388 mtty_dev.dev.release = mtty_device_release;
1389 dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
1390
1391 ret = device_register(&mtty_dev.dev);
1392 if (ret)
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001393 goto err_class;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301394
Dan Carpenterd293dba2016-11-24 14:27:26 +03001395 ret = mdev_register_device(&mtty_dev.dev, &mdev_fops);
1396 if (ret)
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001397 goto err_device;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301398
1399 mutex_init(&mdev_list_lock);
1400 INIT_LIST_HEAD(&mdev_devices_list);
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001401 return 0;
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301402
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001403err_device:
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301404 device_unregister(&mtty_dev.dev);
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001405err_class:
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301406 class_destroy(mtty_dev.vd_class);
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001407err_driver:
1408 mdev_unregister_driver(&mtty_driver);
1409err_cdev:
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301410 cdev_del(&mtty_dev.vd_cdev);
Chengguang Xu3e4835f2019-02-12 13:59:32 +08001411 unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301412 return ret;
1413}
1414
1415static void __exit mtty_dev_exit(void)
1416{
1417 mtty_dev.dev.bus = NULL;
1418 mdev_unregister_device(&mtty_dev.dev);
1419
1420 device_unregister(&mtty_dev.dev);
1421 idr_destroy(&mtty_dev.vd_idr);
Jason Gunthorpe09177ac2021-06-17 16:22:16 +02001422 mdev_unregister_driver(&mtty_driver);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301423 cdev_del(&mtty_dev.vd_cdev);
Chengguang Xu3e4835f2019-02-12 13:59:32 +08001424 unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
Kirti Wankhede9d1a5462016-11-17 02:16:33 +05301425 class_destroy(mtty_dev.vd_class);
1426 mtty_dev.vd_class = NULL;
1427 pr_info("mtty_dev: Unloaded!\n");
1428}
1429
1430module_init(mtty_dev_init)
1431module_exit(mtty_dev_exit)
1432
1433MODULE_LICENSE("GPL v2");
1434MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
1435MODULE_VERSION(VERSION_STRING);
1436MODULE_AUTHOR(DRIVER_AUTHOR);