blob: 9154c7029b055b38a9b7fc063a57d1c75421f787 [file] [log] [blame]
Thomas Gleixner25763b32019-05-28 10:10:09 -07001// SPDX-License-Identifier: GPL-2.0-only
Krzysztof Halasa82a96f52008-01-01 21:55:23 +01002/*
3 * Intel IXP4xx Queue Manager driver for Linux
4 *
5 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
Krzysztof Halasa82a96f52008-01-01 21:55:23 +01006 */
7
8#include <linux/ioport.h>
9#include <linux/interrupt.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
Linus Walleij95407242019-01-27 14:08:36 +010012#include <linux/of.h>
Linus Walleij81bca322019-02-10 17:14:10 +010013#include <linux/platform_device.h>
Linus Walleij4af20dc2019-02-10 14:55:58 +010014#include <linux/soc/ixp4xx/qmgr.h>
Arnd Bergmann09aa9aa2019-08-25 21:57:25 +020015#include <linux/soc/ixp4xx/cpu.h>
Krzysztof Halasa82a96f52008-01-01 21:55:23 +010016
Linus Walleijecc133c2019-02-10 20:20:10 +010017static struct qmgr_regs __iomem *qmgr_regs;
18static int qmgr_irq_1;
19static int qmgr_irq_2;
Krzysztof Halasa82a96f52008-01-01 21:55:23 +010020static spinlock_t qmgr_lock;
21static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +010022static void (*irq_handlers[QUEUES])(void *pdev);
23static void *irq_pdevs[QUEUES];
Krzysztof Halasa82a96f52008-01-01 21:55:23 +010024
Krzysztof Hałasae6da96a2008-12-22 00:26:38 +010025#if DEBUG_QMGR
26char qmgr_queue_descs[QUEUES][32];
27#endif
28
Linus Walleijd08502f2019-02-10 20:15:11 +010029void qmgr_put_entry(unsigned int queue, u32 val)
30{
31#if DEBUG_QMGR
32 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
33
34 printk(KERN_DEBUG "Queue %s(%i) put %X\n",
35 qmgr_queue_descs[queue], queue, val);
36#endif
37 __raw_writel(val, &qmgr_regs->acc[queue][0]);
38}
39
40u32 qmgr_get_entry(unsigned int queue)
41{
42 u32 val;
43 val = __raw_readl(&qmgr_regs->acc[queue][0]);
44#if DEBUG_QMGR
45 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
46
47 printk(KERN_DEBUG "Queue %s(%i) get %X\n",
48 qmgr_queue_descs[queue], queue, val);
49#endif
50 return val;
51}
52
53static int __qmgr_get_stat1(unsigned int queue)
54{
55 return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
56 >> ((queue & 7) << 2)) & 0xF;
57}
58
59static int __qmgr_get_stat2(unsigned int queue)
60{
61 BUG_ON(queue >= HALF_QUEUES);
62 return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
63 >> ((queue & 0xF) << 1)) & 0x3;
64}
65
66/**
67 * qmgr_stat_empty() - checks if a hardware queue is empty
68 * @queue: queue number
69 *
70 * Returns non-zero value if the queue is empty.
71 */
72int qmgr_stat_empty(unsigned int queue)
73{
74 BUG_ON(queue >= HALF_QUEUES);
75 return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
76}
77
78/**
79 * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark
80 * @queue: queue number
81 *
82 * Returns non-zero value if the queue is below low watermark.
83 */
84int qmgr_stat_below_low_watermark(unsigned int queue)
85{
86 if (queue >= HALF_QUEUES)
87 return (__raw_readl(&qmgr_regs->statne_h) >>
88 (queue - HALF_QUEUES)) & 0x01;
89 return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
90}
91
92/**
Linus Walleijd08502f2019-02-10 20:15:11 +010093 * qmgr_stat_full() - checks if a hardware queue is full
94 * @queue: queue number
95 *
96 * Returns non-zero value if the queue is full.
97 */
98int qmgr_stat_full(unsigned int queue)
99{
100 if (queue >= HALF_QUEUES)
101 return (__raw_readl(&qmgr_regs->statf_h) >>
102 (queue - HALF_QUEUES)) & 0x01;
103 return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
104}
105
106/**
Linus Walleijd08502f2019-02-10 20:15:11 +0100107 * qmgr_stat_overflow() - checks if a hardware queue experienced overflow
108 * @queue: queue number
109 *
110 * Returns non-zero value if the queue experienced overflow.
111 */
112int qmgr_stat_overflow(unsigned int queue)
113{
114 return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
115}
116
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100117void qmgr_set_irq(unsigned int queue, int src,
118 void (*handler)(void *pdev), void *pdev)
119{
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100120 unsigned long flags;
121
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100122 spin_lock_irqsave(&qmgr_lock, flags);
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +0100123 if (queue < HALF_QUEUES) {
Arnd Bergmann0d2c9f052012-09-28 23:36:10 +0200124 u32 __iomem *reg;
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +0100125 int bit;
126 BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
127 reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
128 bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
129 __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
130 reg);
131 } else
132 /* IRQ source for queues 32-63 is fixed */
133 BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
134
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100135 irq_handlers[queue] = handler;
136 irq_pdevs[queue] = pdev;
137 spin_unlock_irqrestore(&qmgr_lock, flags);
138}
139
140
Krzysztof Hałasad4c9e9f2009-05-23 23:36:03 +0200141static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
142{
143 int i, ret = 0;
Krzysztof Hałasa0771c692009-04-28 19:32:55 +0200144 u32 en_bitmap, src, stat;
Krzysztof Hałasad4c9e9f2009-05-23 23:36:03 +0200145
146 /* ACK - it may clear any bits so don't rely on it */
147 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
148
Arnd Bergmanna8eee862021-08-03 10:12:34 +0200149 en_bitmap = __raw_readl(&qmgr_regs->irqen[0]);
Krzysztof Hałasa0771c692009-04-28 19:32:55 +0200150 while (en_bitmap) {
151 i = __fls(en_bitmap); /* number of the last "low" queue */
152 en_bitmap &= ~BIT(i);
Arnd Bergmanna8eee862021-08-03 10:12:34 +0200153 src = __raw_readl(&qmgr_regs->irqsrc[i >> 3]);
154 stat = __raw_readl(&qmgr_regs->stat1[i >> 3]);
Krzysztof Hałasad4c9e9f2009-05-23 23:36:03 +0200155 if (src & 4) /* the IRQ condition is inverted */
156 stat = ~stat;
157 if (stat & BIT(src & 3)) {
158 irq_handlers[i](irq_pdevs[i]);
159 ret = IRQ_HANDLED;
160 }
161 }
162 return ret;
163}
164
165
166static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
167{
168 int i, ret = 0;
169 u32 req_bitmap;
170
171 /* ACK - it may clear any bits so don't rely on it */
172 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
173
Arnd Bergmanna8eee862021-08-03 10:12:34 +0200174 req_bitmap = __raw_readl(&qmgr_regs->irqen[1]) &
175 __raw_readl(&qmgr_regs->statne_h);
Krzysztof Hałasa0771c692009-04-28 19:32:55 +0200176 while (req_bitmap) {
177 i = __fls(req_bitmap); /* number of the last "high" queue */
178 req_bitmap &= ~BIT(i);
Krzysztof Hałasad4c9e9f2009-05-23 23:36:03 +0200179 irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
180 ret = IRQ_HANDLED;
181 }
182 return ret;
183}
184
185
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +0100186static irqreturn_t qmgr_irq(int irq, void *pdev)
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100187{
Linus Walleijecc133c2019-02-10 20:20:10 +0100188 int i, half = (irq == qmgr_irq_1 ? 0 : 1);
Krzysztof Hałasa0771c692009-04-28 19:32:55 +0200189 u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100190
Krzysztof Hałasa0771c692009-04-28 19:32:55 +0200191 if (!req_bitmap)
192 return 0;
193 __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
194
195 while (req_bitmap) {
196 i = __fls(req_bitmap); /* number of the last queue */
197 req_bitmap &= ~BIT(i);
198 i += half * HALF_QUEUES;
199 irq_handlers[i](irq_pdevs[i]);
200 }
201 return IRQ_HANDLED;
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100202}
203
204
205void qmgr_enable_irq(unsigned int queue)
206{
207 unsigned long flags;
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +0100208 int half = queue / 32;
209 u32 mask = 1 << (queue & (HALF_QUEUES - 1));
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100210
211 spin_lock_irqsave(&qmgr_lock, flags);
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +0100212 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
213 &qmgr_regs->irqen[half]);
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100214 spin_unlock_irqrestore(&qmgr_lock, flags);
215}
216
217void qmgr_disable_irq(unsigned int queue)
218{
219 unsigned long flags;
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +0100220 int half = queue / 32;
221 u32 mask = 1 << (queue & (HALF_QUEUES - 1));
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100222
223 spin_lock_irqsave(&qmgr_lock, flags);
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +0100224 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
225 &qmgr_regs->irqen[half]);
226 __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100227 spin_unlock_irqrestore(&qmgr_lock, flags);
228}
229
230static inline void shift_mask(u32 *mask)
231{
232 mask[3] = mask[3] << 1 | mask[2] >> 31;
233 mask[2] = mask[2] << 1 | mask[1] >> 31;
234 mask[1] = mask[1] << 1 | mask[0] >> 31;
235 mask[0] <<= 1;
236}
237
Krzysztof Hałasae6da96a2008-12-22 00:26:38 +0100238#if DEBUG_QMGR
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100239int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
240 unsigned int nearly_empty_watermark,
Krzysztof Hałasae6da96a2008-12-22 00:26:38 +0100241 unsigned int nearly_full_watermark,
242 const char *desc_format, const char* name)
243#else
244int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
245 unsigned int nearly_empty_watermark,
246 unsigned int nearly_full_watermark)
247#endif
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100248{
249 u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
250 int err;
251
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +0100252 BUG_ON(queue >= QUEUES);
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100253
254 if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
255 return -EINVAL;
256
257 switch (len) {
258 case 16:
259 cfg = 0 << 24;
260 mask[0] = 0x1;
261 break;
262 case 32:
263 cfg = 1 << 24;
264 mask[0] = 0x3;
265 break;
266 case 64:
267 cfg = 2 << 24;
268 mask[0] = 0xF;
269 break;
270 case 128:
271 cfg = 3 << 24;
272 mask[0] = 0xFF;
273 break;
274 default:
275 return -EINVAL;
276 }
277
278 cfg |= nearly_empty_watermark << 26;
279 cfg |= nearly_full_watermark << 29;
280 len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
281 mask[1] = mask[2] = mask[3] = 0;
282
283 if (!try_module_get(THIS_MODULE))
284 return -ENODEV;
285
286 spin_lock_irq(&qmgr_lock);
287 if (__raw_readl(&qmgr_regs->sram[queue])) {
288 err = -EBUSY;
289 goto err;
290 }
291
292 while (1) {
293 if (!(used_sram_bitmap[0] & mask[0]) &&
294 !(used_sram_bitmap[1] & mask[1]) &&
295 !(used_sram_bitmap[2] & mask[2]) &&
296 !(used_sram_bitmap[3] & mask[3]))
297 break; /* found free space */
298
299 addr++;
300 shift_mask(mask);
301 if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
302 printk(KERN_ERR "qmgr: no free SRAM space for"
303 " queue %i\n", queue);
304 err = -ENOMEM;
305 goto err;
306 }
307 }
308
309 used_sram_bitmap[0] |= mask[0];
310 used_sram_bitmap[1] |= mask[1];
311 used_sram_bitmap[2] |= mask[2];
312 used_sram_bitmap[3] |= mask[3];
313 __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
Krzysztof Hałasae6da96a2008-12-22 00:26:38 +0100314#if DEBUG_QMGR
315 snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
316 desc_format, name);
317 printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
318 qmgr_queue_descs[queue], queue, addr);
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100319#endif
Krzysztof Hałasae6da96a2008-12-22 00:26:38 +0100320 spin_unlock_irq(&qmgr_lock);
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100321 return 0;
322
323err:
324 spin_unlock_irq(&qmgr_lock);
325 module_put(THIS_MODULE);
326 return err;
327}
328
329void qmgr_release_queue(unsigned int queue)
330{
331 u32 cfg, addr, mask[4];
332
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +0100333 BUG_ON(queue >= QUEUES); /* not in valid range */
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100334
335 spin_lock_irq(&qmgr_lock);
336 cfg = __raw_readl(&qmgr_regs->sram[queue]);
337 addr = (cfg >> 14) & 0xFF;
338
339 BUG_ON(!addr); /* not requested */
340
341 switch ((cfg >> 24) & 3) {
342 case 0: mask[0] = 0x1; break;
343 case 1: mask[0] = 0x3; break;
344 case 2: mask[0] = 0xF; break;
345 case 3: mask[0] = 0xFF; break;
346 }
347
Krzysztof Halasadac2f832008-04-20 19:06:39 +0200348 mask[1] = mask[2] = mask[3] = 0;
349
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100350 while (addr--)
351 shift_mask(mask);
352
Krzysztof Hałasae6da96a2008-12-22 00:26:38 +0100353#if DEBUG_QMGR
354 printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
355 qmgr_queue_descs[queue], queue);
356 qmgr_queue_descs[queue][0] = '\x0';
357#endif
Krzysztof Hałasa3c3a3b42010-05-03 23:22:06 +0200358
359 while ((addr = qmgr_get_entry(queue)))
360 printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
361 queue, addr);
362
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100363 __raw_writel(0, &qmgr_regs->sram[queue]);
364
365 used_sram_bitmap[0] &= ~mask[0];
366 used_sram_bitmap[1] &= ~mask[1];
367 used_sram_bitmap[2] &= ~mask[2];
368 used_sram_bitmap[3] &= ~mask[3];
369 irq_handlers[queue] = NULL; /* catch IRQ bugs */
370 spin_unlock_irq(&qmgr_lock);
371
372 module_put(THIS_MODULE);
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100373}
374
Linus Walleij81bca322019-02-10 17:14:10 +0100375static int ixp4xx_qmgr_probe(struct platform_device *pdev)
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100376{
377 int i, err;
Krzysztof Hałasad4c9e9f2009-05-23 23:36:03 +0200378 irq_handler_t handler1, handler2;
Linus Walleijecc133c2019-02-10 20:20:10 +0100379 struct device *dev = &pdev->dev;
380 struct resource *res;
381 int irq1, irq2;
Krzysztof Hałasad4c9e9f2009-05-23 23:36:03 +0200382
Linus Walleijecc133c2019-02-10 20:20:10 +0100383 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
384 if (!res)
385 return -ENODEV;
386 qmgr_regs = devm_ioremap_resource(dev, res);
Dan Carpenterc180d712019-05-06 08:31:08 +0200387 if (IS_ERR(qmgr_regs))
388 return PTR_ERR(qmgr_regs);
Linus Walleijecc133c2019-02-10 20:20:10 +0100389
390 irq1 = platform_get_irq(pdev, 0);
391 if (irq1 <= 0)
392 return irq1 ? irq1 : -EINVAL;
393 qmgr_irq_1 = irq1;
394 irq2 = platform_get_irq(pdev, 1);
395 if (irq2 <= 0)
396 return irq2 ? irq2 : -EINVAL;
397 qmgr_irq_2 = irq2;
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100398
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100399 /* reset qmgr registers */
400 for (i = 0; i < 4; i++) {
401 __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
402 __raw_writel(0, &qmgr_regs->irqsrc[i]);
403 }
404 for (i = 0; i < 2; i++) {
405 __raw_writel(0, &qmgr_regs->stat2[i]);
406 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
407 __raw_writel(0, &qmgr_regs->irqen[i]);
408 }
409
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +0100410 __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
411 __raw_writel(0, &qmgr_regs->statf_h);
412
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100413 for (i = 0; i < QUEUES; i++)
414 __raw_writel(0, &qmgr_regs->sram[i]);
415
Krzysztof Hałasad4c9e9f2009-05-23 23:36:03 +0200416 if (cpu_is_ixp42x_rev_a0()) {
417 handler1 = qmgr_irq1_a0;
418 handler2 = qmgr_irq2_a0;
419 } else
420 handler1 = handler2 = qmgr_irq;
421
Linus Walleijecc133c2019-02-10 20:20:10 +0100422 err = devm_request_irq(dev, irq1, handler1, 0, "IXP4xx Queue Manager",
423 NULL);
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100424 if (err) {
Linus Walleijecc133c2019-02-10 20:20:10 +0100425 dev_err(dev, "failed to request IRQ%i (%i)\n",
426 irq1, err);
427 return err;
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100428 }
429
Linus Walleijecc133c2019-02-10 20:20:10 +0100430 err = devm_request_irq(dev, irq2, handler2, 0, "IXP4xx Queue Manager",
431 NULL);
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +0100432 if (err) {
Linus Walleijecc133c2019-02-10 20:20:10 +0100433 dev_err(dev, "failed to request IRQ%i (%i)\n",
434 irq2, err);
435 return err;
Krzysztof Hałasaa6a9fb82009-02-20 01:01:33 +0100436 }
437
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100438 used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
439 spin_lock_init(&qmgr_lock);
440
Linus Walleijecc133c2019-02-10 20:20:10 +0100441 dev_info(dev, "IXP4xx Queue Manager initialized.\n");
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100442 return 0;
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100443}
444
Linus Walleij81bca322019-02-10 17:14:10 +0100445static int ixp4xx_qmgr_remove(struct platform_device *pdev)
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100446{
Linus Walleijecc133c2019-02-10 20:20:10 +0100447 synchronize_irq(qmgr_irq_1);
448 synchronize_irq(qmgr_irq_2);
Linus Walleij81bca322019-02-10 17:14:10 +0100449 return 0;
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100450}
451
Linus Walleij9e01a002019-02-10 23:41:49 +0100452static const struct of_device_id ixp4xx_qmgr_of_match[] = {
453 {
454 .compatible = "intel,ixp4xx-ahb-queue-manager",
455 },
456 {},
457};
458
Linus Walleij81bca322019-02-10 17:14:10 +0100459static struct platform_driver ixp4xx_qmgr_driver = {
460 .driver = {
461 .name = "ixp4xx-qmgr",
Linus Walleij9e01a002019-02-10 23:41:49 +0100462 .of_match_table = of_match_ptr(ixp4xx_qmgr_of_match),
Linus Walleij81bca322019-02-10 17:14:10 +0100463 },
464 .probe = ixp4xx_qmgr_probe,
465 .remove = ixp4xx_qmgr_remove,
466};
467module_platform_driver(ixp4xx_qmgr_driver);
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100468
469MODULE_LICENSE("GPL v2");
470MODULE_AUTHOR("Krzysztof Halasa");
471
Linus Walleijd08502f2019-02-10 20:15:11 +0100472EXPORT_SYMBOL(qmgr_put_entry);
473EXPORT_SYMBOL(qmgr_get_entry);
474EXPORT_SYMBOL(qmgr_stat_empty);
475EXPORT_SYMBOL(qmgr_stat_below_low_watermark);
476EXPORT_SYMBOL(qmgr_stat_full);
477EXPORT_SYMBOL(qmgr_stat_overflow);
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100478EXPORT_SYMBOL(qmgr_set_irq);
479EXPORT_SYMBOL(qmgr_enable_irq);
480EXPORT_SYMBOL(qmgr_disable_irq);
Krzysztof Hałasae6da96a2008-12-22 00:26:38 +0100481#if DEBUG_QMGR
482EXPORT_SYMBOL(qmgr_queue_descs);
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100483EXPORT_SYMBOL(qmgr_request_queue);
Krzysztof Hałasae6da96a2008-12-22 00:26:38 +0100484#else
485EXPORT_SYMBOL(__qmgr_request_queue);
486#endif
Krzysztof Halasa82a96f52008-01-01 21:55:23 +0100487EXPORT_SYMBOL(qmgr_release_queue);