Jeff Dike | 2ea5bc5 | 2007-05-10 22:22:32 -0700 | [diff] [blame] | 1 | /* |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * Licensed under the GPL |
| 4 | * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c: |
| 5 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
| 6 | */ |
| 7 | |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 8 | #include "linux/cpumask.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include "linux/hardirq.h" |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 10 | #include "linux/interrupt.h" |
| 11 | #include "linux/kernel_stat.h" |
| 12 | #include "linux/module.h" |
Alexey Dobriyan | d43c36d | 2009-10-07 17:09:06 +0400 | [diff] [blame] | 13 | #include "linux/sched.h" |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 14 | #include "linux/seq_file.h" |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 15 | #include "linux/slab.h" |
Jeff Dike | c14b849 | 2007-05-10 22:22:34 -0700 | [diff] [blame] | 16 | #include "as-layout.h" |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 17 | #include "kern_util.h" |
| 18 | #include "os.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
| 20 | /* |
| 21 | * Generic, controller-independent functions: |
| 22 | */ |
| 23 | |
| 24 | int show_interrupts(struct seq_file *p, void *v) |
| 25 | { |
| 26 | int i = *(loff_t *) v, j; |
| 27 | struct irqaction * action; |
| 28 | unsigned long flags; |
| 29 | |
| 30 | if (i == 0) { |
| 31 | seq_printf(p, " "); |
| 32 | for_each_online_cpu(j) |
| 33 | seq_printf(p, "CPU%d ",j); |
| 34 | seq_putc(p, '\n'); |
| 35 | } |
| 36 | |
| 37 | if (i < NR_IRQS) { |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 38 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | action = irq_desc[i].action; |
Jeff Dike | 2ea5bc5 | 2007-05-10 22:22:32 -0700 | [diff] [blame] | 40 | if (!action) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | goto skip; |
| 42 | seq_printf(p, "%3d: ",i); |
| 43 | #ifndef CONFIG_SMP |
| 44 | seq_printf(p, "%10u ", kstat_irqs(i)); |
| 45 | #else |
| 46 | for_each_online_cpu(j) |
Yinghai Lu | dee4102 | 2009-01-11 00:29:15 -0800 | [diff] [blame] | 47 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #endif |
Thomas Gleixner | d1ea13c | 2010-09-23 18:40:07 +0200 | [diff] [blame^] | 49 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | seq_printf(p, " %s", action->name); |
| 51 | |
| 52 | for (action=action->next; action; action = action->next) |
| 53 | seq_printf(p, ", %s", action->name); |
| 54 | |
| 55 | seq_putc(p, '\n'); |
| 56 | skip: |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 57 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 58 | } else if (i == NR_IRQS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | seq_putc(p, '\n'); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
| 61 | return 0; |
| 62 | } |
| 63 | |
Jeff Dike | d973a77 | 2007-05-06 14:51:27 -0700 | [diff] [blame] | 64 | /* |
| 65 | * This list is accessed under irq_lock, except in sigio_handler, |
| 66 | * where it is safe from being modified. IRQ handlers won't change it - |
| 67 | * if an IRQ source has vanished, it will be freed by free_irqs just |
| 68 | * before returning from sigio_handler. That will process a separate |
| 69 | * list of irqs to free, with its own locking, coming back here to |
| 70 | * remove list elements, taking the irq_lock to do so. |
| 71 | */ |
Jeff Dike | f2e6299 | 2007-02-10 01:44:23 -0800 | [diff] [blame] | 72 | static struct irq_fd *active_fds = NULL; |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 73 | static struct irq_fd **last_irq_ptr = &active_fds; |
| 74 | |
| 75 | extern void free_irqs(void); |
| 76 | |
Jeff Dike | 77bf440 | 2007-10-16 01:26:58 -0700 | [diff] [blame] | 77 | void sigio_handler(int sig, struct uml_pt_regs *regs) |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 78 | { |
| 79 | struct irq_fd *irq_fd; |
| 80 | int n; |
| 81 | |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 82 | if (smp_sigio_handler()) |
| 83 | return; |
| 84 | |
| 85 | while (1) { |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 86 | n = os_waiting_for_events(active_fds); |
| 87 | if (n <= 0) { |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 88 | if (n == -EINTR) |
| 89 | continue; |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 90 | else break; |
| 91 | } |
| 92 | |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 93 | for (irq_fd = active_fds; irq_fd != NULL; |
| 94 | irq_fd = irq_fd->next) { |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 95 | if (irq_fd->current_events != 0) { |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 96 | irq_fd->current_events = 0; |
| 97 | do_IRQ(irq_fd->irq, regs); |
| 98 | } |
| 99 | } |
| 100 | } |
| 101 | |
| 102 | free_irqs(); |
| 103 | } |
| 104 | |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 105 | static DEFINE_SPINLOCK(irq_lock); |
| 106 | |
WANG Cong | 4c182ae | 2008-07-23 21:28:47 -0700 | [diff] [blame] | 107 | static int activate_fd(int irq, int fd, int type, void *dev_id) |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 108 | { |
| 109 | struct pollfd *tmp_pfd; |
| 110 | struct irq_fd *new_fd, *irq_fd; |
| 111 | unsigned long flags; |
Jeff Dike | bf8fde7 | 2008-02-04 22:31:04 -0800 | [diff] [blame] | 112 | int events, err, n; |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 113 | |
Jeff Dike | bf8fde7 | 2008-02-04 22:31:04 -0800 | [diff] [blame] | 114 | err = os_set_fd_async(fd); |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 115 | if (err < 0) |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 116 | goto out; |
| 117 | |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 118 | err = -ENOMEM; |
Jeff Dike | f2e6299 | 2007-02-10 01:44:23 -0800 | [diff] [blame] | 119 | new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL); |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 120 | if (new_fd == NULL) |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 121 | goto out; |
| 122 | |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 123 | if (type == IRQ_READ) |
| 124 | events = UM_POLLIN | UM_POLLPRI; |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 125 | else events = UM_POLLOUT; |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 126 | *new_fd = ((struct irq_fd) { .next = NULL, |
| 127 | .id = dev_id, |
| 128 | .fd = fd, |
| 129 | .type = type, |
| 130 | .irq = irq, |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 131 | .events = events, |
| 132 | .current_events = 0 } ); |
| 133 | |
Paolo 'Blaisorblade' Giarrusso | 0f97869 | 2007-03-07 20:41:13 -0800 | [diff] [blame] | 134 | err = -EBUSY; |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 135 | spin_lock_irqsave(&irq_lock, flags); |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 136 | for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) { |
| 137 | if ((irq_fd->fd == fd) && (irq_fd->type == type)) { |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 138 | printk(KERN_ERR "Registering fd %d twice\n", fd); |
| 139 | printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq); |
| 140 | printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id, |
| 141 | dev_id); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 142 | goto out_unlock; |
| 143 | } |
| 144 | } |
| 145 | |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 146 | if (type == IRQ_WRITE) |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 147 | fd = -1; |
| 148 | |
| 149 | tmp_pfd = NULL; |
| 150 | n = 0; |
| 151 | |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 152 | while (1) { |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 153 | n = os_create_pollfd(fd, events, tmp_pfd, n); |
| 154 | if (n == 0) |
| 155 | break; |
| 156 | |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 157 | /* |
| 158 | * n > 0 |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 159 | * It means we couldn't put new pollfd to current pollfds |
| 160 | * and tmp_fds is NULL or too small for new pollfds array. |
| 161 | * Needed size is equal to n as minimum. |
| 162 | * |
| 163 | * Here we have to drop the lock in order to call |
| 164 | * kmalloc, which might sleep. |
| 165 | * If something else came in and changed the pollfds array |
| 166 | * so we will not be able to put new pollfd struct to pollfds |
| 167 | * then we free the buffer tmp_fds and try again. |
| 168 | */ |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 169 | spin_unlock_irqrestore(&irq_lock, flags); |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 170 | kfree(tmp_pfd); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 171 | |
Jeff Dike | f2e6299 | 2007-02-10 01:44:23 -0800 | [diff] [blame] | 172 | tmp_pfd = kmalloc(n, GFP_KERNEL); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 173 | if (tmp_pfd == NULL) |
| 174 | goto out_kfree; |
| 175 | |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 176 | spin_lock_irqsave(&irq_lock, flags); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 177 | } |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 178 | |
| 179 | *last_irq_ptr = new_fd; |
| 180 | last_irq_ptr = &new_fd->next; |
| 181 | |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 182 | spin_unlock_irqrestore(&irq_lock, flags); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 183 | |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 184 | /* |
| 185 | * This calls activate_fd, so it has to be outside the critical |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 186 | * section. |
| 187 | */ |
Jeff Dike | 8e64d96a | 2006-07-10 04:45:11 -0700 | [diff] [blame] | 188 | maybe_sigio_broken(fd, (type == IRQ_READ)); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 189 | |
Jeff Dike | 19bdf04 | 2006-09-25 23:33:04 -0700 | [diff] [blame] | 190 | return 0; |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 191 | |
| 192 | out_unlock: |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 193 | spin_unlock_irqrestore(&irq_lock, flags); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 194 | out_kfree: |
| 195 | kfree(new_fd); |
| 196 | out: |
Jeff Dike | 19bdf04 | 2006-09-25 23:33:04 -0700 | [diff] [blame] | 197 | return err; |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg) |
| 201 | { |
| 202 | unsigned long flags; |
| 203 | |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 204 | spin_lock_irqsave(&irq_lock, flags); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 205 | os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr); |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 206 | spin_unlock_irqrestore(&irq_lock, flags); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 207 | } |
| 208 | |
| 209 | struct irq_and_dev { |
| 210 | int irq; |
| 211 | void *dev; |
| 212 | }; |
| 213 | |
| 214 | static int same_irq_and_dev(struct irq_fd *irq, void *d) |
| 215 | { |
| 216 | struct irq_and_dev *data = d; |
| 217 | |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 218 | return ((irq->irq == data->irq) && (irq->id == data->dev)); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 219 | } |
| 220 | |
WANG Cong | 4c182ae | 2008-07-23 21:28:47 -0700 | [diff] [blame] | 221 | static void free_irq_by_irq_and_dev(unsigned int irq, void *dev) |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 222 | { |
| 223 | struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq, |
| 224 | .dev = dev }); |
| 225 | |
| 226 | free_irq_by_cb(same_irq_and_dev, &data); |
| 227 | } |
| 228 | |
| 229 | static int same_fd(struct irq_fd *irq, void *fd) |
| 230 | { |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 231 | return (irq->fd == *((int *)fd)); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 232 | } |
| 233 | |
| 234 | void free_irq_by_fd(int fd) |
| 235 | { |
| 236 | free_irq_by_cb(same_fd, &fd); |
| 237 | } |
| 238 | |
Jeff Dike | d973a77 | 2007-05-06 14:51:27 -0700 | [diff] [blame] | 239 | /* Must be called with irq_lock held */ |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 240 | static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out) |
| 241 | { |
| 242 | struct irq_fd *irq; |
| 243 | int i = 0; |
| 244 | int fdi; |
| 245 | |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 246 | for (irq = active_fds; irq != NULL; irq = irq->next) { |
| 247 | if ((irq->fd == fd) && (irq->irq == irqnum)) |
| 248 | break; |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 249 | i++; |
| 250 | } |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 251 | if (irq == NULL) { |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 252 | printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n", |
| 253 | fd); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 254 | goto out; |
| 255 | } |
| 256 | fdi = os_get_pollfd(i); |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 257 | if ((fdi != -1) && (fdi != fd)) { |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 258 | printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds " |
| 259 | "and pollfds, fd %d vs %d, need %d\n", irq->fd, |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 260 | fdi, fd); |
| 261 | irq = NULL; |
| 262 | goto out; |
| 263 | } |
| 264 | *index_out = i; |
| 265 | out: |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 266 | return irq; |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 267 | } |
| 268 | |
| 269 | void reactivate_fd(int fd, int irqnum) |
| 270 | { |
| 271 | struct irq_fd *irq; |
| 272 | unsigned long flags; |
| 273 | int i; |
| 274 | |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 275 | spin_lock_irqsave(&irq_lock, flags); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 276 | irq = find_irq_by_fd(fd, irqnum, &i); |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 277 | if (irq == NULL) { |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 278 | spin_unlock_irqrestore(&irq_lock, flags); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 279 | return; |
| 280 | } |
| 281 | os_set_pollfd(i, irq->fd); |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 282 | spin_unlock_irqrestore(&irq_lock, flags); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 283 | |
Jeff Dike | 19bdf04 | 2006-09-25 23:33:04 -0700 | [diff] [blame] | 284 | add_sigio_fd(fd); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 285 | } |
| 286 | |
| 287 | void deactivate_fd(int fd, int irqnum) |
| 288 | { |
| 289 | struct irq_fd *irq; |
| 290 | unsigned long flags; |
| 291 | int i; |
| 292 | |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 293 | spin_lock_irqsave(&irq_lock, flags); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 294 | irq = find_irq_by_fd(fd, irqnum, &i); |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 295 | if (irq == NULL) { |
Jeff Dike | 19bdf04 | 2006-09-25 23:33:04 -0700 | [diff] [blame] | 296 | spin_unlock_irqrestore(&irq_lock, flags); |
| 297 | return; |
| 298 | } |
| 299 | |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 300 | os_set_pollfd(i, -1); |
Jeff Dike | bfaafd7 | 2006-07-10 04:45:10 -0700 | [diff] [blame] | 301 | spin_unlock_irqrestore(&irq_lock, flags); |
Jeff Dike | 19bdf04 | 2006-09-25 23:33:04 -0700 | [diff] [blame] | 302 | |
| 303 | ignore_sigio_fd(fd); |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 304 | } |
| 305 | |
Jeff Dike | d973a77 | 2007-05-06 14:51:27 -0700 | [diff] [blame] | 306 | /* |
| 307 | * Called just before shutdown in order to provide a clean exec |
| 308 | * environment in case the system is rebooting. No locking because |
| 309 | * that would cause a pointless shutdown hang if something hadn't |
| 310 | * released the lock. |
| 311 | */ |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 312 | int deactivate_all_fds(void) |
| 313 | { |
| 314 | struct irq_fd *irq; |
| 315 | int err; |
| 316 | |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 317 | for (irq = active_fds; irq != NULL; irq = irq->next) { |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 318 | err = os_clear_fd_async(irq->fd); |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 319 | if (err) |
| 320 | return err; |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 321 | } |
| 322 | /* If there is a signal already queued, after unblocking ignore it */ |
| 323 | os_set_ioignore(); |
| 324 | |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 325 | return 0; |
Jeff Dike | 9b4f018 | 2006-03-27 01:14:31 -0800 | [diff] [blame] | 326 | } |
| 327 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | /* |
Simon Arlott | b60745b | 2007-10-20 01:23:03 +0200 | [diff] [blame] | 329 | * do_IRQ handles all normal device IRQs (the special |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | * SMP cross-CPU interrupts have their own specific |
| 331 | * handlers). |
| 332 | */ |
Jeff Dike | 77bf440 | 2007-10-16 01:26:58 -0700 | [diff] [blame] | 333 | unsigned int do_IRQ(int irq, struct uml_pt_regs *regs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | { |
Al Viro | 7bea96f | 2006-10-08 22:49:34 +0100 | [diff] [blame] | 335 | struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs); |
| 336 | irq_enter(); |
| 337 | __do_IRQ(irq); |
| 338 | irq_exit(); |
| 339 | set_irq_regs(old_regs); |
| 340 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | } |
| 342 | |
| 343 | int um_request_irq(unsigned int irq, int fd, int type, |
David Howells | 40220c1 | 2006-10-09 12:19:47 +0100 | [diff] [blame] | 344 | irq_handler_t handler, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | unsigned long irqflags, const char * devname, |
| 346 | void *dev_id) |
| 347 | { |
| 348 | int err; |
| 349 | |
Jeff Dike | 9ac625a | 2007-11-14 17:00:23 -0800 | [diff] [blame] | 350 | if (fd != -1) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | err = activate_fd(irq, fd, type, dev_id); |
Jeff Dike | 9ac625a | 2007-11-14 17:00:23 -0800 | [diff] [blame] | 352 | if (err) |
| 353 | return err; |
| 354 | } |
| 355 | |
| 356 | return request_irq(irq, handler, irqflags, devname, dev_id); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | } |
Jeff Dike | 9ac625a | 2007-11-14 17:00:23 -0800 | [diff] [blame] | 358 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | EXPORT_SYMBOL(um_request_irq); |
| 360 | EXPORT_SYMBOL(reactivate_fd); |
| 361 | |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 362 | /* |
Thomas Gleixner | 6fa851c | 2009-06-16 15:33:29 -0700 | [diff] [blame] | 363 | * irq_chip must define (startup || enable) && |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 364 | * (shutdown || disable) && end |
| 365 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | static void dummy(unsigned int irq) |
| 367 | { |
| 368 | } |
| 369 | |
Paolo 'Blaisorblade' Giarrusso | dbce706 | 2005-06-21 17:16:19 -0700 | [diff] [blame] | 370 | /* This is used for everything else than the timer. */ |
Thomas Gleixner | 6fa851c | 2009-06-16 15:33:29 -0700 | [diff] [blame] | 371 | static struct irq_chip normal_irq_type = { |
Thomas Gleixner | d1ea13c | 2010-09-23 18:40:07 +0200 | [diff] [blame^] | 372 | .name = "SIGIO", |
Paolo 'Blaisorblade' Giarrusso | dbce706 | 2005-06-21 17:16:19 -0700 | [diff] [blame] | 373 | .release = free_irq_by_irq_and_dev, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | .disable = dummy, |
| 375 | .enable = dummy, |
| 376 | .ack = dummy, |
| 377 | .end = dummy |
| 378 | }; |
| 379 | |
Thomas Gleixner | 6fa851c | 2009-06-16 15:33:29 -0700 | [diff] [blame] | 380 | static struct irq_chip SIGVTALRM_irq_type = { |
Thomas Gleixner | d1ea13c | 2010-09-23 18:40:07 +0200 | [diff] [blame^] | 381 | .name = "SIGVTALRM", |
Paolo 'Blaisorblade' Giarrusso | dbce706 | 2005-06-21 17:16:19 -0700 | [diff] [blame] | 382 | .release = free_irq_by_irq_and_dev, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | .shutdown = dummy, /* never called */ |
| 384 | .disable = dummy, |
| 385 | .enable = dummy, |
| 386 | .ack = dummy, |
| 387 | .end = dummy |
| 388 | }; |
| 389 | |
| 390 | void __init init_IRQ(void) |
| 391 | { |
| 392 | int i; |
| 393 | |
| 394 | irq_desc[TIMER_IRQ].status = IRQ_DISABLED; |
| 395 | irq_desc[TIMER_IRQ].action = NULL; |
| 396 | irq_desc[TIMER_IRQ].depth = 1; |
Ingo Molnar | d1bef4e | 2006-06-29 02:24:36 -0700 | [diff] [blame] | 397 | irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | enable_irq(TIMER_IRQ); |
Jesper Juhl | 191ef96 | 2006-05-01 12:15:57 -0700 | [diff] [blame] | 399 | for (i = 1; i < NR_IRQS; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | irq_desc[i].status = IRQ_DISABLED; |
| 401 | irq_desc[i].action = NULL; |
| 402 | irq_desc[i].depth = 1; |
Ingo Molnar | d1bef4e | 2006-06-29 02:24:36 -0700 | [diff] [blame] | 403 | irq_desc[i].chip = &normal_irq_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | enable_irq(i); |
| 405 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | } |
| 407 | |
Jeff Dike | c14b849 | 2007-05-10 22:22:34 -0700 | [diff] [blame] | 408 | /* |
| 409 | * IRQ stack entry and exit: |
| 410 | * |
| 411 | * Unlike i386, UML doesn't receive IRQs on the normal kernel stack |
| 412 | * and switch over to the IRQ stack after some preparation. We use |
| 413 | * sigaltstack to receive signals on a separate stack from the start. |
| 414 | * These two functions make sure the rest of the kernel won't be too |
| 415 | * upset by being on a different stack. The IRQ stack has a |
| 416 | * thread_info structure at the bottom so that current et al continue |
| 417 | * to work. |
| 418 | * |
| 419 | * to_irq_stack copies the current task's thread_info to the IRQ stack |
| 420 | * thread_info and sets the tasks's stack to point to the IRQ stack. |
| 421 | * |
| 422 | * from_irq_stack copies the thread_info struct back (flags may have |
| 423 | * been modified) and resets the task's stack pointer. |
| 424 | * |
| 425 | * Tricky bits - |
| 426 | * |
| 427 | * What happens when two signals race each other? UML doesn't block |
| 428 | * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal |
| 429 | * could arrive while a previous one is still setting up the |
| 430 | * thread_info. |
| 431 | * |
| 432 | * There are three cases - |
| 433 | * The first interrupt on the stack - sets up the thread_info and |
| 434 | * handles the interrupt |
| 435 | * A nested interrupt interrupting the copying of the thread_info - |
| 436 | * can't handle the interrupt, as the stack is in an unknown state |
| 437 | * A nested interrupt not interrupting the copying of the |
| 438 | * thread_info - doesn't do any setup, just handles the interrupt |
| 439 | * |
| 440 | * The first job is to figure out whether we interrupted stack setup. |
| 441 | * This is done by xchging the signal mask with thread_info->pending. |
| 442 | * If the value that comes back is zero, then there is no setup in |
| 443 | * progress, and the interrupt can be handled. If the value is |
| 444 | * non-zero, then there is stack setup in progress. In order to have |
| 445 | * the interrupt handled, we leave our signal in the mask, and it will |
| 446 | * be handled by the upper handler after it has set up the stack. |
| 447 | * |
| 448 | * Next is to figure out whether we are the outer handler or a nested |
| 449 | * one. As part of setting up the stack, thread_info->real_thread is |
| 450 | * set to non-NULL (and is reset to NULL on exit). This is the |
| 451 | * nesting indicator. If it is non-NULL, then the stack is already |
| 452 | * set up and the handler can run. |
| 453 | */ |
| 454 | |
| 455 | static unsigned long pending_mask; |
| 456 | |
Jeff Dike | 508a927 | 2007-09-18 22:46:49 -0700 | [diff] [blame] | 457 | unsigned long to_irq_stack(unsigned long *mask_out) |
Jeff Dike | c14b849 | 2007-05-10 22:22:34 -0700 | [diff] [blame] | 458 | { |
| 459 | struct thread_info *ti; |
| 460 | unsigned long mask, old; |
| 461 | int nested; |
| 462 | |
Jeff Dike | 508a927 | 2007-09-18 22:46:49 -0700 | [diff] [blame] | 463 | mask = xchg(&pending_mask, *mask_out); |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 464 | if (mask != 0) { |
| 465 | /* |
| 466 | * If any interrupts come in at this point, we want to |
Jeff Dike | c14b849 | 2007-05-10 22:22:34 -0700 | [diff] [blame] | 467 | * make sure that their bits aren't lost by our |
| 468 | * putting our bit in. So, this loop accumulates bits |
| 469 | * until xchg returns the same value that we put in. |
| 470 | * When that happens, there were no new interrupts, |
| 471 | * and pending_mask contains a bit for each interrupt |
| 472 | * that came in. |
| 473 | */ |
Jeff Dike | 508a927 | 2007-09-18 22:46:49 -0700 | [diff] [blame] | 474 | old = *mask_out; |
Jeff Dike | c14b849 | 2007-05-10 22:22:34 -0700 | [diff] [blame] | 475 | do { |
| 476 | old |= mask; |
| 477 | mask = xchg(&pending_mask, old); |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 478 | } while (mask != old); |
Jeff Dike | c14b849 | 2007-05-10 22:22:34 -0700 | [diff] [blame] | 479 | return 1; |
| 480 | } |
| 481 | |
| 482 | ti = current_thread_info(); |
| 483 | nested = (ti->real_thread != NULL); |
Jeff Dike | ba180fd | 2007-10-16 01:27:00 -0700 | [diff] [blame] | 484 | if (!nested) { |
Jeff Dike | c14b849 | 2007-05-10 22:22:34 -0700 | [diff] [blame] | 485 | struct task_struct *task; |
| 486 | struct thread_info *tti; |
| 487 | |
| 488 | task = cpu_tasks[ti->cpu].task; |
| 489 | tti = task_thread_info(task); |
Jeff Dike | 508a927 | 2007-09-18 22:46:49 -0700 | [diff] [blame] | 490 | |
Jeff Dike | c14b849 | 2007-05-10 22:22:34 -0700 | [diff] [blame] | 491 | *ti = *tti; |
| 492 | ti->real_thread = tti; |
| 493 | task->stack = ti; |
| 494 | } |
| 495 | |
| 496 | mask = xchg(&pending_mask, 0); |
| 497 | *mask_out |= mask | nested; |
| 498 | return 0; |
| 499 | } |
| 500 | |
| 501 | unsigned long from_irq_stack(int nested) |
| 502 | { |
| 503 | struct thread_info *ti, *to; |
| 504 | unsigned long mask; |
| 505 | |
| 506 | ti = current_thread_info(); |
| 507 | |
| 508 | pending_mask = 1; |
| 509 | |
| 510 | to = ti->real_thread; |
| 511 | current->stack = to; |
| 512 | ti->real_thread = NULL; |
| 513 | *to = *ti; |
| 514 | |
| 515 | mask = xchg(&pending_mask, 0); |
| 516 | return mask & ~1; |
| 517 | } |
| 518 | |