blob: 60eb765c5364792a6a9b62129d15a4dfa9d48d65 [file] [log] [blame]
Alex Elder650d1602020-03-05 22:28:21 -06001// SPDX-License-Identifier: GPL-2.0
2
3/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2020 Linaro Ltd.
5 */
6
7#include <linux/types.h>
8#include <linux/bits.h>
9#include <linux/bitfield.h>
10#include <linux/mutex.h>
11#include <linux/completion.h>
12#include <linux/io.h>
13#include <linux/bug.h>
14#include <linux/interrupt.h>
15#include <linux/platform_device.h>
16#include <linux/netdevice.h>
17
18#include "gsi.h"
19#include "gsi_reg.h"
20#include "gsi_private.h"
21#include "gsi_trans.h"
22#include "ipa_gsi.h"
23#include "ipa_data.h"
Alex Elder1d0c09d2020-11-02 11:53:55 -060024#include "ipa_version.h"
Alex Elder650d1602020-03-05 22:28:21 -060025
26/**
27 * DOC: The IPA Generic Software Interface
28 *
29 * The generic software interface (GSI) is an integral component of the IPA,
30 * providing a well-defined communication layer between the AP subsystem
31 * and the IPA core. The modem uses the GSI layer as well.
32 *
33 * -------- ---------
34 * | | | |
35 * | AP +<---. .----+ Modem |
36 * | +--. | | .->+ |
37 * | | | | | | | |
38 * -------- | | | | ---------
39 * v | v |
40 * --+-+---+-+--
41 * | GSI |
42 * |-----------|
43 * | |
44 * | IPA |
45 * | |
46 * -------------
47 *
48 * In the above diagram, the AP and Modem represent "execution environments"
49 * (EEs), which are independent operating environments that use the IPA for
50 * data transfer.
51 *
52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer
53 * of data to or from the IPA. A channel is implemented as a ring buffer,
54 * with a DRAM-resident array of "transfer elements" (TREs) available to
55 * describe transfers to or from other EEs through the IPA. A transfer
56 * element can also contain an immediate command, requesting the IPA perform
57 * actions other than data transfer.
58 *
59 * Each TRE refers to a block of data--also located DRAM. After writing one
60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a
61 * doorbell register to inform the receiving side how many elements have
62 * been written.
63 *
64 * Each channel has a GSI "event ring" associated with it. An event ring
65 * is implemented very much like a channel ring, but is always directed from
66 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
67 * events by adding an entry to the event ring associated with the channel.
68 * The GSI then writes its doorbell for the event ring, causing the target
69 * EE to be interrupted. Each entry in an event ring contains a pointer
70 * to the channel TRE whose completion the event represents.
71 *
72 * Each TRE in a channel ring has a set of flags. One flag indicates whether
73 * the completion of the transfer operation generates an entry (and possibly
74 * an interrupt) in the channel's event ring. Other flags allow transfer
75 * elements to be chained together, forming a single logical transaction.
76 * TRE flags are used to control whether and when interrupts are generated
77 * to signal completion of channel transfers.
78 *
79 * Elements in channel and event rings are completed (or consumed) strictly
80 * in order. Completion of one entry implies the completion of all preceding
81 * entries. A single completion interrupt can therefore communicate the
82 * completion of many transfers.
83 *
84 * Note that all GSI registers are little-endian, which is the assumed
85 * endianness of I/O space accesses. The accessor functions perform byte
86 * swapping if needed (i.e., for a big endian CPU).
87 */
88
89/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
90#define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
91
Alex Elder59b5f452021-01-13 11:15:30 -060092#define GSI_CMD_TIMEOUT 50 /* milliseconds */
Alex Elder650d1602020-03-05 22:28:21 -060093
Alex Elder057ef632021-01-13 11:15:32 -060094#define GSI_CHANNEL_STOP_RETRIES 10
Alex Elder11361452020-11-19 16:49:27 -060095#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
Alex Elder650d1602020-03-05 22:28:21 -060096
97#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
98#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
99
100#define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
101
102/* An entry in an event ring */
103struct gsi_event {
104 __le64 xfer_ptr;
105 __le16 len;
106 u8 reserved1;
107 u8 code;
108 __le16 reserved2;
109 u8 type;
110 u8 chid;
111};
112
Alex Elder650d1602020-03-05 22:28:21 -0600113/** gsi_channel_scratch_gpi - GPI protocol scratch register
114 * @max_outstanding_tre:
115 * Defines the maximum number of TREs allowed in a single transaction
116 * on a channel (in bytes). This determines the amount of prefetch
117 * performed by the hardware. We configure this to equal the size of
118 * the TLV FIFO for the channel.
119 * @outstanding_threshold:
120 * Defines the threshold (in bytes) determining when the sequencer
121 * should update the channel doorbell. We configure this to equal
122 * the size of two TREs.
123 */
124struct gsi_channel_scratch_gpi {
125 u64 reserved1;
126 u16 reserved2;
127 u16 max_outstanding_tre;
128 u16 reserved3;
129 u16 outstanding_threshold;
130};
131
132/** gsi_channel_scratch - channel scratch configuration area
133 *
134 * The exact interpretation of this register is protocol-specific.
135 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
136 */
137union gsi_channel_scratch {
138 struct gsi_channel_scratch_gpi gpi;
139 struct {
140 u32 word1;
141 u32 word2;
142 u32 word3;
143 u32 word4;
144 } data;
145};
146
147/* Check things that can be validated at build time. */
148static void gsi_validate_build(void)
149{
150 /* This is used as a divisor */
151 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
152
153 /* Code assumes the size of channel and event ring element are
154 * the same (and fixed). Make sure the size of an event ring
155 * element is what's expected.
156 */
157 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
158
159 /* Hardware requires a 2^n ring size. We ensure the number of
160 * elements in an event ring is a power of 2 elsewhere; this
161 * ensure the elements themselves meet the requirement.
162 */
163 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
164
165 /* The channel element size must fit in this field */
166 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
167
168 /* The event ring element size must fit in this field */
169 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
170}
171
172/* Return the channel id associated with a given channel */
173static u32 gsi_channel_id(struct gsi_channel *channel)
174{
175 return channel - &channel->gsi->channel[0];
176}
177
Alex Elder3ca97ff2020-11-05 12:14:00 -0600178/* Update the GSI IRQ type register with the cached value */
Alex Elder8194be72020-11-05 12:14:07 -0600179static void gsi_irq_type_update(struct gsi *gsi, u32 val)
Alex Elder3ca97ff2020-11-05 12:14:00 -0600180{
Alex Elder8194be72020-11-05 12:14:07 -0600181 gsi->type_enabled_bitmap = val;
182 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
Alex Elder3ca97ff2020-11-05 12:14:00 -0600183}
184
Alex Elderb054d4f2020-11-05 12:14:01 -0600185static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
186{
Alex Elder8194be72020-11-05 12:14:07 -0600187 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
Alex Elderb054d4f2020-11-05 12:14:01 -0600188}
189
190static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
191{
Alex Elder8194be72020-11-05 12:14:07 -0600192 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
Alex Elderb054d4f2020-11-05 12:14:01 -0600193}
194
Alex Elder97eb94c2020-11-05 12:13:59 -0600195/* Turn off all GSI interrupts initially */
196static void gsi_irq_setup(struct gsi *gsi)
197{
Alex Eldercdeee492020-11-25 14:45:22 -0600198 u32 adjust;
199
Alex Elder8194be72020-11-05 12:14:07 -0600200 /* Disable all interrupt types */
201 gsi_irq_type_update(gsi, 0);
Alex Elderb054d4f2020-11-05 12:14:01 -0600202
Alex Elder8194be72020-11-05 12:14:07 -0600203 /* Clear all type-specific interrupt masks */
Alex Elderb054d4f2020-11-05 12:14:01 -0600204 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
Alex Elderb4175f82020-11-05 12:14:02 -0600205 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
Alex Elderd6c9e3f2020-11-05 12:14:03 -0600206 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
Alex Elder06c86322020-11-05 12:14:04 -0600207 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
Alex Eldercdeee492020-11-25 14:45:22 -0600208
209 /* Reverse the offset adjustment for inter-EE register offsets */
210 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
211 iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
212 iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
213
Alex Elder352f26a2020-11-05 12:14:06 -0600214 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
Alex Elder97eb94c2020-11-05 12:13:59 -0600215}
216
217/* Turn off all GSI interrupts when we're all done */
218static void gsi_irq_teardown(struct gsi *gsi)
219{
Alex Elder8194be72020-11-05 12:14:07 -0600220 /* Nothing to do */
Alex Elder97eb94c2020-11-05 12:13:59 -0600221}
222
Alex Eldera60d0632021-01-13 11:15:28 -0600223/* Event ring commands are performed one at a time. Their completion
224 * is signaled by the event ring control GSI interrupt type, which is
225 * only enabled when we issue an event ring command. Only the event
226 * ring being operated on has this interrupt enabled.
227 */
228static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
229{
230 u32 val = BIT(evt_ring_id);
231
232 /* There's a small chance that a previous command completed
233 * after the interrupt was disabled, so make sure we have no
234 * pending interrupts before we enable them.
235 */
236 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
237
238 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
239 gsi_irq_type_enable(gsi, GSI_EV_CTRL);
240}
241
242/* Disable event ring control interrupts */
243static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
244{
245 gsi_irq_type_disable(gsi, GSI_EV_CTRL);
246 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
247}
248
249/* Channel commands are performed one at a time. Their completion is
250 * signaled by the channel control GSI interrupt type, which is only
251 * enabled when we issue a channel command. Only the channel being
252 * operated on has this interrupt enabled.
253 */
254static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
255{
256 u32 val = BIT(channel_id);
257
258 /* There's a small chance that a previous command completed
259 * after the interrupt was disabled, so make sure we have no
260 * pending interrupts before we enable them.
261 */
262 iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
263
264 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
265 gsi_irq_type_enable(gsi, GSI_CH_CTRL);
266}
267
268/* Disable channel control interrupts */
269static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
270{
271 gsi_irq_type_disable(gsi, GSI_CH_CTRL);
272 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
273}
274
Alex Elder57255932021-01-21 05:48:20 -0600275static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
Alex Elder650d1602020-03-05 22:28:21 -0600276{
Alex Elder06c86322020-11-05 12:14:04 -0600277 bool enable_ieob = !gsi->ieob_enabled_bitmap;
Alex Elder650d1602020-03-05 22:28:21 -0600278 u32 val;
279
Alex Eldera0545392020-11-05 12:13:57 -0600280 gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
281 val = gsi->ieob_enabled_bitmap;
Alex Elder650d1602020-03-05 22:28:21 -0600282 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
Alex Elder06c86322020-11-05 12:14:04 -0600283
284 /* Enable the interrupt type if this is the first channel enabled */
285 if (enable_ieob)
286 gsi_irq_type_enable(gsi, GSI_IEOB);
Alex Elder650d1602020-03-05 22:28:21 -0600287}
288
Alex Elder57255932021-01-21 05:48:20 -0600289static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
Alex Elder650d1602020-03-05 22:28:21 -0600290{
291 u32 val;
292
Alex Elder57255932021-01-21 05:48:20 -0600293 gsi->ieob_enabled_bitmap &= ~event_mask;
Alex Elder06c86322020-11-05 12:14:04 -0600294
295 /* Disable the interrupt type if this was the last enabled channel */
296 if (!gsi->ieob_enabled_bitmap)
297 gsi_irq_type_disable(gsi, GSI_IEOB);
298
Alex Eldera0545392020-11-05 12:13:57 -0600299 val = gsi->ieob_enabled_bitmap;
Alex Elder650d1602020-03-05 22:28:21 -0600300 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
301}
302
Alex Elder57255932021-01-21 05:48:20 -0600303static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
304{
305 gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
306}
307
Alex Elder650d1602020-03-05 22:28:21 -0600308/* Enable all GSI_interrupt types */
309static void gsi_irq_enable(struct gsi *gsi)
310{
311 u32 val;
312
Alex Elderd6c9e3f2020-11-05 12:14:03 -0600313 /* Global interrupts include hardware error reports. Enable
314 * that so we can at least report the error should it occur.
315 */
Alex Elder6c6358c2020-11-10 15:59:17 -0600316 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
Alex Elder8194be72020-11-05 12:14:07 -0600317 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
Alex Elderd6c9e3f2020-11-05 12:14:03 -0600318
Alex Elder352f26a2020-11-05 12:14:06 -0600319 /* General GSI interrupts are reported to all EEs; if they occur
320 * they are unrecoverable (without reset). A breakpoint interrupt
321 * also exists, but we don't support that. We want to be notified
322 * of errors so we can report them, even if they can't be handled.
323 */
Alex Elder6c6358c2020-11-10 15:59:17 -0600324 val = BIT(BUS_ERROR);
325 val |= BIT(CMD_FIFO_OVRFLOW);
326 val |= BIT(MCS_STACK_OVRFLOW);
Alex Elder650d1602020-03-05 22:28:21 -0600327 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
Alex Elder8194be72020-11-05 12:14:07 -0600328 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
Alex Elder650d1602020-03-05 22:28:21 -0600329}
330
Alex Elder3ca97ff2020-11-05 12:14:00 -0600331/* Disable all GSI interrupt types */
Alex Elder650d1602020-03-05 22:28:21 -0600332static void gsi_irq_disable(struct gsi *gsi)
333{
Alex Elder8194be72020-11-05 12:14:07 -0600334 gsi_irq_type_update(gsi, 0);
Alex Elder97eb94c2020-11-05 12:13:59 -0600335
Alex Elder8194be72020-11-05 12:14:07 -0600336 /* Clear the type-specific interrupt masks set by gsi_irq_enable() */
Alex Elder650d1602020-03-05 22:28:21 -0600337 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
Alex Elderd6c9e3f2020-11-05 12:14:03 -0600338 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
Alex Elder650d1602020-03-05 22:28:21 -0600339}
340
341/* Return the virtual address associated with a ring index */
342void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
343{
344 /* Note: index *must* be used modulo the ring count here */
345 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
346}
347
348/* Return the 32-bit DMA address associated with a ring index */
349static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
350{
351 return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
352}
353
354/* Return the ring index of a 32-bit ring offset */
355static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
356{
357 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
358}
359
360/* Issue a GSI command by writing a value to a register, then wait for
361 * completion to be signaled. Returns true if the command completes
362 * or false if it times out.
363 */
364static bool
365gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
366{
Alex Elder59b5f452021-01-13 11:15:30 -0600367 unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
368
Alex Elder650d1602020-03-05 22:28:21 -0600369 reinit_completion(completion);
370
371 iowrite32(val, gsi->virt + reg);
372
Alex Elder59b5f452021-01-13 11:15:30 -0600373 return !!wait_for_completion_timeout(completion, timeout);
Alex Elder650d1602020-03-05 22:28:21 -0600374}
375
376/* Return the hardware's notion of the current state of an event ring */
377static enum gsi_evt_ring_state
378gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
379{
380 u32 val;
381
382 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
383
384 return u32_get_bits(val, EV_CHSTATE_FMASK);
385}
386
387/* Issue an event ring command and wait for it to complete */
Alex Elderd9cbe812021-01-13 11:15:27 -0600388static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
389 enum gsi_evt_cmd_opcode opcode)
Alex Elder650d1602020-03-05 22:28:21 -0600390{
391 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
392 struct completion *completion = &evt_ring->completion;
Alex Elder84634882020-06-30 07:58:45 -0500393 struct device *dev = gsi->dev;
Alex Elderd9cbe812021-01-13 11:15:27 -0600394 bool timeout;
Alex Elder650d1602020-03-05 22:28:21 -0600395 u32 val;
396
Alex Eldera60d0632021-01-13 11:15:28 -0600397 /* Enable the completion interrupt for the command */
398 gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
Alex Elderb4175f82020-11-05 12:14:02 -0600399
Alex Elder650d1602020-03-05 22:28:21 -0600400 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
401 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
402
Alex Elderd9cbe812021-01-13 11:15:27 -0600403 timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
Alex Elderb4175f82020-11-05 12:14:02 -0600404
Alex Eldera60d0632021-01-13 11:15:28 -0600405 gsi_irq_ev_ctrl_disable(gsi);
Alex Elderb4175f82020-11-05 12:14:02 -0600406
Alex Elderd9cbe812021-01-13 11:15:27 -0600407 if (!timeout)
Alex Elder1ddf7762020-12-26 15:37:37 -0600408 return;
Alex Elder650d1602020-03-05 22:28:21 -0600409
Alex Elder84634882020-06-30 07:58:45 -0500410 dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
411 opcode, evt_ring_id, evt_ring->state);
Alex Elder650d1602020-03-05 22:28:21 -0600412}
413
414/* Allocate an event ring in NOT_ALLOCATED state */
415static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
416{
417 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
Alex Elder650d1602020-03-05 22:28:21 -0600418
419 /* Get initial event ring state */
420 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
Alex Eldera442b3c2020-06-30 07:58:44 -0500421 if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
Alex Elderf8d3bdd2020-11-19 16:49:24 -0600422 dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
423 evt_ring_id, evt_ring->state);
Alex Elder650d1602020-03-05 22:28:21 -0600424 return -EINVAL;
Alex Eldera442b3c2020-06-30 07:58:44 -0500425 }
Alex Elder650d1602020-03-05 22:28:21 -0600426
Alex Elderd9cbe812021-01-13 11:15:27 -0600427 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
Alex Elder650d1602020-03-05 22:28:21 -0600428
Alex Elder428b4482020-12-22 12:00:12 -0600429 /* If successful the event ring state will have changed */
430 if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
431 return 0;
432
433 dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
434 evt_ring_id, evt_ring->state);
435
436 return -EIO;
Alex Elder650d1602020-03-05 22:28:21 -0600437}
438
439/* Reset a GSI event ring in ALLOCATED or ERROR state. */
440static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
441{
442 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
443 enum gsi_evt_ring_state state = evt_ring->state;
Alex Elder650d1602020-03-05 22:28:21 -0600444
445 if (state != GSI_EVT_RING_STATE_ALLOCATED &&
446 state != GSI_EVT_RING_STATE_ERROR) {
Alex Elderf8d3bdd2020-11-19 16:49:24 -0600447 dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
448 evt_ring_id, evt_ring->state);
Alex Elder650d1602020-03-05 22:28:21 -0600449 return;
450 }
451
Alex Elderd9cbe812021-01-13 11:15:27 -0600452 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
Alex Elder428b4482020-12-22 12:00:12 -0600453
454 /* If successful the event ring state will have changed */
455 if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
456 return;
457
458 dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
459 evt_ring_id, evt_ring->state);
Alex Elder650d1602020-03-05 22:28:21 -0600460}
461
462/* Issue a hardware de-allocation request for an allocated event ring */
463static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
464{
465 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
Alex Elder650d1602020-03-05 22:28:21 -0600466
467 if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
Alex Elderf8d3bdd2020-11-19 16:49:24 -0600468 dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
469 evt_ring_id, evt_ring->state);
Alex Elder650d1602020-03-05 22:28:21 -0600470 return;
471 }
472
Alex Elderd9cbe812021-01-13 11:15:27 -0600473 gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
Alex Elder428b4482020-12-22 12:00:12 -0600474
475 /* If successful the event ring state will have changed */
476 if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
477 return;
478
479 dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
480 evt_ring_id, evt_ring->state);
Alex Elder650d1602020-03-05 22:28:21 -0600481}
482
Alex Eldera2003b32020-04-30 17:13:23 -0500483/* Fetch the current state of a channel from hardware */
Alex Elderaba79242020-04-30 17:13:22 -0500484static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
Alex Elder650d1602020-03-05 22:28:21 -0600485{
Alex Elderaba79242020-04-30 17:13:22 -0500486 u32 channel_id = gsi_channel_id(channel);
Alex Eldere6cdd6d2021-02-01 17:26:06 -0600487 void __iomem *virt = channel->gsi->virt;
Alex Elder650d1602020-03-05 22:28:21 -0600488 u32 val;
489
Alex Elderaba79242020-04-30 17:13:22 -0500490 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
Alex Elder650d1602020-03-05 22:28:21 -0600491
492 return u32_get_bits(val, CHSTATE_FMASK);
493}
494
495/* Issue a channel command and wait for it to complete */
Alex Elder11693182020-12-26 15:37:36 -0600496static void
Alex Elder650d1602020-03-05 22:28:21 -0600497gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
498{
499 struct completion *completion = &channel->completion;
500 u32 channel_id = gsi_channel_id(channel);
Alex Eldera2003b32020-04-30 17:13:23 -0500501 struct gsi *gsi = channel->gsi;
Alex Elder84634882020-06-30 07:58:45 -0500502 struct device *dev = gsi->dev;
Alex Elderd9cbe812021-01-13 11:15:27 -0600503 bool timeout;
Alex Elder650d1602020-03-05 22:28:21 -0600504 u32 val;
505
Alex Eldera60d0632021-01-13 11:15:28 -0600506 /* Enable the completion interrupt for the command */
507 gsi_irq_ch_ctrl_enable(gsi, channel_id);
Alex Elderb054d4f2020-11-05 12:14:01 -0600508
Alex Elder650d1602020-03-05 22:28:21 -0600509 val = u32_encode_bits(channel_id, CH_CHID_FMASK);
510 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
Alex Elderd9cbe812021-01-13 11:15:27 -0600511 timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
Alex Elder650d1602020-03-05 22:28:21 -0600512
Alex Eldera60d0632021-01-13 11:15:28 -0600513 gsi_irq_ch_ctrl_disable(gsi);
Alex Elderb054d4f2020-11-05 12:14:01 -0600514
Alex Elderd9cbe812021-01-13 11:15:27 -0600515 if (!timeout)
Alex Elder11693182020-12-26 15:37:36 -0600516 return;
Alex Elder650d1602020-03-05 22:28:21 -0600517
Alex Elder84634882020-06-30 07:58:45 -0500518 dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
Alex Eldera2003b32020-04-30 17:13:23 -0500519 opcode, channel_id, gsi_channel_state(channel));
Alex Elder650d1602020-03-05 22:28:21 -0600520}
521
522/* Allocate GSI channel in NOT_ALLOCATED state */
523static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
524{
525 struct gsi_channel *channel = &gsi->channel[channel_id];
Alex Eldera442b3c2020-06-30 07:58:44 -0500526 struct device *dev = gsi->dev;
Alex Eldera2003b32020-04-30 17:13:23 -0500527 enum gsi_channel_state state;
Alex Elder650d1602020-03-05 22:28:21 -0600528
529 /* Get initial channel state */
Alex Eldera2003b32020-04-30 17:13:23 -0500530 state = gsi_channel_state(channel);
Alex Eldera442b3c2020-06-30 07:58:44 -0500531 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
Alex Elderf8d3bdd2020-11-19 16:49:24 -0600532 dev_err(dev, "channel %u bad state %u before alloc\n",
533 channel_id, state);
Alex Elder650d1602020-03-05 22:28:21 -0600534 return -EINVAL;
Alex Eldera442b3c2020-06-30 07:58:44 -0500535 }
Alex Elder650d1602020-03-05 22:28:21 -0600536
Alex Elder11693182020-12-26 15:37:36 -0600537 gsi_channel_command(channel, GSI_CH_ALLOCATE);
Alex Eldera2003b32020-04-30 17:13:23 -0500538
Alex Elder6ffddf32020-12-22 12:00:11 -0600539 /* If successful the channel state will have changed */
Alex Eldera2003b32020-04-30 17:13:23 -0500540 state = gsi_channel_state(channel);
Alex Elder6ffddf32020-12-22 12:00:11 -0600541 if (state == GSI_CHANNEL_STATE_ALLOCATED)
542 return 0;
Alex Elder650d1602020-03-05 22:28:21 -0600543
Alex Elder6ffddf32020-12-22 12:00:11 -0600544 dev_err(dev, "channel %u bad state %u after alloc\n",
545 channel_id, state);
546
547 return -EIO;
Alex Elder650d1602020-03-05 22:28:21 -0600548}
549
550/* Start an ALLOCATED channel */
551static int gsi_channel_start_command(struct gsi_channel *channel)
552{
Alex Eldera442b3c2020-06-30 07:58:44 -0500553 struct device *dev = channel->gsi->dev;
Alex Eldera2003b32020-04-30 17:13:23 -0500554 enum gsi_channel_state state;
Alex Elder650d1602020-03-05 22:28:21 -0600555
Alex Eldera2003b32020-04-30 17:13:23 -0500556 state = gsi_channel_state(channel);
Alex Elder650d1602020-03-05 22:28:21 -0600557 if (state != GSI_CHANNEL_STATE_ALLOCATED &&
Alex Eldera442b3c2020-06-30 07:58:44 -0500558 state != GSI_CHANNEL_STATE_STOPPED) {
Alex Elderf8d3bdd2020-11-19 16:49:24 -0600559 dev_err(dev, "channel %u bad state %u before start\n",
560 gsi_channel_id(channel), state);
Alex Elder650d1602020-03-05 22:28:21 -0600561 return -EINVAL;
Alex Eldera442b3c2020-06-30 07:58:44 -0500562 }
Alex Elder650d1602020-03-05 22:28:21 -0600563
Alex Elder11693182020-12-26 15:37:36 -0600564 gsi_channel_command(channel, GSI_CH_START);
Alex Eldera2003b32020-04-30 17:13:23 -0500565
Alex Elder6ffddf32020-12-22 12:00:11 -0600566 /* If successful the channel state will have changed */
Alex Eldera2003b32020-04-30 17:13:23 -0500567 state = gsi_channel_state(channel);
Alex Elder6ffddf32020-12-22 12:00:11 -0600568 if (state == GSI_CHANNEL_STATE_STARTED)
569 return 0;
Alex Elder650d1602020-03-05 22:28:21 -0600570
Alex Elder6ffddf32020-12-22 12:00:11 -0600571 dev_err(dev, "channel %u bad state %u after start\n",
572 gsi_channel_id(channel), state);
573
574 return -EIO;
Alex Elder650d1602020-03-05 22:28:21 -0600575}
576
577/* Stop a GSI channel in STARTED state */
578static int gsi_channel_stop_command(struct gsi_channel *channel)
579{
Alex Eldera442b3c2020-06-30 07:58:44 -0500580 struct device *dev = channel->gsi->dev;
Alex Eldera2003b32020-04-30 17:13:23 -0500581 enum gsi_channel_state state;
Alex Elder650d1602020-03-05 22:28:21 -0600582
Alex Eldera2003b32020-04-30 17:13:23 -0500583 state = gsi_channel_state(channel);
Alex Elder5468cbc2020-06-30 07:44:42 -0500584
585 /* Channel could have entered STOPPED state since last call
586 * if it timed out. If so, we're done.
587 */
588 if (state == GSI_CHANNEL_STATE_STOPPED)
589 return 0;
590
Alex Elder650d1602020-03-05 22:28:21 -0600591 if (state != GSI_CHANNEL_STATE_STARTED &&
Alex Eldera442b3c2020-06-30 07:58:44 -0500592 state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
Alex Elderf8d3bdd2020-11-19 16:49:24 -0600593 dev_err(dev, "channel %u bad state %u before stop\n",
594 gsi_channel_id(channel), state);
Alex Elder650d1602020-03-05 22:28:21 -0600595 return -EINVAL;
Alex Eldera442b3c2020-06-30 07:58:44 -0500596 }
Alex Elder650d1602020-03-05 22:28:21 -0600597
Alex Elder11693182020-12-26 15:37:36 -0600598 gsi_channel_command(channel, GSI_CH_STOP);
Alex Eldera2003b32020-04-30 17:13:23 -0500599
Alex Elder6ffddf32020-12-22 12:00:11 -0600600 /* If successful the channel state will have changed */
Alex Eldera2003b32020-04-30 17:13:23 -0500601 state = gsi_channel_state(channel);
Alex Elder6ffddf32020-12-22 12:00:11 -0600602 if (state == GSI_CHANNEL_STATE_STOPPED)
603 return 0;
Alex Elder650d1602020-03-05 22:28:21 -0600604
605 /* We may have to try again if stop is in progress */
Alex Eldera2003b32020-04-30 17:13:23 -0500606 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
Alex Elder650d1602020-03-05 22:28:21 -0600607 return -EAGAIN;
608
Alex Elderf8d3bdd2020-11-19 16:49:24 -0600609 dev_err(dev, "channel %u bad state %u after stop\n",
610 gsi_channel_id(channel), state);
Alex Elder650d1602020-03-05 22:28:21 -0600611
612 return -EIO;
613}
614
615/* Reset a GSI channel in ALLOCATED or ERROR state. */
616static void gsi_channel_reset_command(struct gsi_channel *channel)
617{
Alex Eldera442b3c2020-06-30 07:58:44 -0500618 struct device *dev = channel->gsi->dev;
Alex Eldera2003b32020-04-30 17:13:23 -0500619 enum gsi_channel_state state;
Alex Elder650d1602020-03-05 22:28:21 -0600620
Alex Elder74401942021-01-13 11:15:29 -0600621 /* A short delay is required before a RESET command */
622 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
Alex Elder650d1602020-03-05 22:28:21 -0600623
Alex Eldera2003b32020-04-30 17:13:23 -0500624 state = gsi_channel_state(channel);
625 if (state != GSI_CHANNEL_STATE_STOPPED &&
626 state != GSI_CHANNEL_STATE_ERROR) {
Alex Elder5d289132020-11-19 16:49:25 -0600627 /* No need to reset a channel already in ALLOCATED state */
628 if (state != GSI_CHANNEL_STATE_ALLOCATED)
629 dev_err(dev, "channel %u bad state %u before reset\n",
630 gsi_channel_id(channel), state);
Alex Elder650d1602020-03-05 22:28:21 -0600631 return;
632 }
633
Alex Elder11693182020-12-26 15:37:36 -0600634 gsi_channel_command(channel, GSI_CH_RESET);
Alex Eldera2003b32020-04-30 17:13:23 -0500635
Alex Elder6ffddf32020-12-22 12:00:11 -0600636 /* If successful the channel state will have changed */
Alex Eldera2003b32020-04-30 17:13:23 -0500637 state = gsi_channel_state(channel);
Alex Elder6ffddf32020-12-22 12:00:11 -0600638 if (state != GSI_CHANNEL_STATE_ALLOCATED)
Alex Elderf8d3bdd2020-11-19 16:49:24 -0600639 dev_err(dev, "channel %u bad state %u after reset\n",
640 gsi_channel_id(channel), state);
Alex Elder650d1602020-03-05 22:28:21 -0600641}
642
643/* Deallocate an ALLOCATED GSI channel */
644static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
645{
646 struct gsi_channel *channel = &gsi->channel[channel_id];
Alex Eldera442b3c2020-06-30 07:58:44 -0500647 struct device *dev = gsi->dev;
Alex Eldera2003b32020-04-30 17:13:23 -0500648 enum gsi_channel_state state;
Alex Elder650d1602020-03-05 22:28:21 -0600649
Alex Eldera2003b32020-04-30 17:13:23 -0500650 state = gsi_channel_state(channel);
651 if (state != GSI_CHANNEL_STATE_ALLOCATED) {
Alex Elderf8d3bdd2020-11-19 16:49:24 -0600652 dev_err(dev, "channel %u bad state %u before dealloc\n",
653 channel_id, state);
Alex Elder650d1602020-03-05 22:28:21 -0600654 return;
655 }
656
Alex Elder11693182020-12-26 15:37:36 -0600657 gsi_channel_command(channel, GSI_CH_DE_ALLOC);
Alex Eldera2003b32020-04-30 17:13:23 -0500658
Alex Elder6ffddf32020-12-22 12:00:11 -0600659 /* If successful the channel state will have changed */
Alex Eldera2003b32020-04-30 17:13:23 -0500660 state = gsi_channel_state(channel);
Alex Elder6ffddf32020-12-22 12:00:11 -0600661
662 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
Alex Elderf8d3bdd2020-11-19 16:49:24 -0600663 dev_err(dev, "channel %u bad state %u after dealloc\n",
664 channel_id, state);
Alex Elder650d1602020-03-05 22:28:21 -0600665}
666
667/* Ring an event ring doorbell, reporting the last entry processed by the AP.
668 * The index argument (modulo the ring count) is the first unfilled entry, so
669 * we supply one less than that with the doorbell. Update the event ring
670 * index field with the value provided.
671 */
672static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
673{
674 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
675 u32 val;
676
677 ring->index = index; /* Next unused entry */
678
679 /* Note: index *must* be used modulo the ring count here */
680 val = gsi_ring_addr(ring, (index - 1) % ring->count);
681 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
682}
683
684/* Program an event ring for use */
685static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
686{
687 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
688 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
689 u32 val;
690
Alex Elder46dda532020-11-10 15:59:18 -0600691 /* We program all event rings as GPI type/protocol */
692 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
Alex Elder650d1602020-03-05 22:28:21 -0600693 val |= EV_INTYPE_FMASK;
694 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
695 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
696
697 val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
698 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
699
700 /* The context 2 and 3 registers store the low-order and
701 * high-order 32 bits of the address of the event ring,
702 * respectively.
703 */
704 val = evt_ring->ring.addr & GENMASK(31, 0);
705 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
706
707 val = evt_ring->ring.addr >> 32;
708 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
709
710 /* Enable interrupt moderation by setting the moderation delay */
711 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
712 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
713 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
714
715 /* No MSI write data, and MSI address high and low address is 0 */
716 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
717 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
718 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
719
720 /* We don't need to get event read pointer updates */
721 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
722 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
723
724 /* Finally, tell the hardware we've completed event 0 (arbitrary) */
725 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
726}
727
Alex Eldere6316922021-02-01 11:28:50 -0600728/* Find the transaction whose completion indicates a channel is quiesced */
Alex Elder650d1602020-03-05 22:28:21 -0600729static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
730{
731 struct gsi_trans_info *trans_info = &channel->trans_info;
Alex Eldere6316922021-02-01 11:28:50 -0600732 const struct list_head *list;
Alex Elder650d1602020-03-05 22:28:21 -0600733 struct gsi_trans *trans;
734
735 spin_lock_bh(&trans_info->spinlock);
736
Alex Eldere6316922021-02-01 11:28:50 -0600737 /* There is a small chance a TX transaction got allocated just
738 * before we disabled transmits, so check for that.
739 */
740 if (channel->toward_ipa) {
741 list = &trans_info->alloc;
742 if (!list_empty(list))
743 goto done;
744 list = &trans_info->pending;
745 if (!list_empty(list))
746 goto done;
747 }
748
749 /* Otherwise (TX or RX) we want to wait for anything that
750 * has completed, or has been polled but not released yet.
751 */
752 list = &trans_info->complete;
753 if (!list_empty(list))
754 goto done;
755 list = &trans_info->polled;
756 if (list_empty(list))
757 list = NULL;
758done:
759 trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
Alex Elder650d1602020-03-05 22:28:21 -0600760
761 /* Caller will wait for this, so take a reference */
762 if (trans)
763 refcount_inc(&trans->refcount);
764
765 spin_unlock_bh(&trans_info->spinlock);
766
767 return trans;
768}
769
770/* Wait for transaction activity on a channel to complete */
771static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
772{
773 struct gsi_trans *trans;
774
775 /* Get the last transaction, and wait for it to complete */
776 trans = gsi_channel_trans_last(channel);
777 if (trans) {
778 wait_for_completion(&trans->completion);
779 gsi_trans_free(trans);
780 }
781}
782
Alex Elder650d1602020-03-05 22:28:21 -0600783/* Program a channel for use */
784static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
785{
786 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
787 u32 channel_id = gsi_channel_id(channel);
788 union gsi_channel_scratch scr = { };
789 struct gsi_channel_scratch_gpi *gpi;
790 struct gsi *gsi = channel->gsi;
791 u32 wrr_weight = 0;
792 u32 val;
793
794 /* Arbitrarily pick TRE 0 as the first channel element to use */
795 channel->tre_ring.index = 0;
796
Alex Elder46dda532020-11-10 15:59:18 -0600797 /* We program all channels as GPI type/protocol */
798 val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK);
Alex Elder650d1602020-03-05 22:28:21 -0600799 if (channel->toward_ipa)
800 val |= CHTYPE_DIR_FMASK;
801 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
802 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
803 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
804
805 val = u32_encode_bits(size, R_LENGTH_FMASK);
806 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
807
808 /* The context 2 and 3 registers store the low-order and
809 * high-order 32 bits of the address of the channel ring,
810 * respectively.
811 */
812 val = channel->tre_ring.addr & GENMASK(31, 0);
813 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
814
815 val = channel->tre_ring.addr >> 32;
816 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
817
818 /* Command channel gets low weighted round-robin priority */
819 if (channel->command)
820 wrr_weight = field_max(WRR_WEIGHT_FMASK);
821 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
822
823 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
824
Alex Elderce549932020-11-02 11:53:59 -0600825 /* We enable the doorbell engine for IPA v3.5.1 */
826 if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
Alex Elder650d1602020-03-05 22:28:21 -0600827 val |= USE_DB_ENG_FMASK;
828
Alex Elder9f848192020-11-25 14:45:17 -0600829 /* v4.0 introduces an escape buffer for prefetch. We use it
830 * on all but the AP command channel.
831 */
Alex Elderb0b6f0d2020-11-25 14:45:21 -0600832 if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) {
833 /* If not otherwise set, prefetch buffers are used */
834 if (gsi->version < IPA_VERSION_4_5)
835 val |= USE_ESCAPE_BUF_ONLY_FMASK;
836 else
837 val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
838 PREFETCH_MODE_FMASK);
839 }
Alex Elder650d1602020-03-05 22:28:21 -0600840
841 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
842
843 /* Now update the scratch registers for GPI protocol */
844 gpi = &scr.gpi;
845 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
846 GSI_RING_ELEMENT_SIZE;
847 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
848
849 val = scr.data.word1;
850 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
851
852 val = scr.data.word2;
853 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
854
855 val = scr.data.word3;
856 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
857
858 /* We must preserve the upper 16 bits of the last scratch register.
859 * The next sequence assumes those bits remain unchanged between the
860 * read and the write.
861 */
862 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
863 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
864 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
865
866 /* All done! */
867}
868
869static void gsi_channel_deprogram(struct gsi_channel *channel)
870{
871 /* Nothing to do */
872}
873
Alex Elder893b8382021-02-01 11:28:46 -0600874static int __gsi_channel_start(struct gsi_channel *channel, bool start)
Alex Elder650d1602020-03-05 22:28:21 -0600875{
Alex Elder893b8382021-02-01 11:28:46 -0600876 struct gsi *gsi = channel->gsi;
Alex Elder650d1602020-03-05 22:28:21 -0600877 int ret;
878
Alex Eldera65c0282021-02-01 11:28:49 -0600879 if (!start)
880 return 0;
Alex Elder4fef6912021-02-01 11:28:48 -0600881
Alex Elder650d1602020-03-05 22:28:21 -0600882 mutex_lock(&gsi->mutex);
883
Alex Eldera65c0282021-02-01 11:28:49 -0600884 ret = gsi_channel_start_command(channel);
Alex Elder650d1602020-03-05 22:28:21 -0600885
886 mutex_unlock(&gsi->mutex);
887
Alex Elder650d1602020-03-05 22:28:21 -0600888 return ret;
889}
890
Alex Elder893b8382021-02-01 11:28:46 -0600891/* Start an allocated GSI channel */
892int gsi_channel_start(struct gsi *gsi, u32 channel_id)
893{
894 struct gsi_channel *channel = &gsi->channel[channel_id];
Alex Eldera65c0282021-02-01 11:28:49 -0600895 int ret;
Alex Elder893b8382021-02-01 11:28:46 -0600896
Alex Eldera65c0282021-02-01 11:28:49 -0600897 /* Enable NAPI and the completion interrupt */
898 napi_enable(&channel->napi);
899 gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
900
901 ret = __gsi_channel_start(channel, true);
902 if (ret) {
903 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
904 napi_disable(&channel->napi);
905 }
906
907 return ret;
Alex Elder893b8382021-02-01 11:28:46 -0600908}
909
Alex Elder697e8342021-02-01 11:28:45 -0600910static int gsi_channel_stop_retry(struct gsi_channel *channel)
Alex Elder650d1602020-03-05 22:28:21 -0600911{
Alex Elder057ef632021-01-13 11:15:32 -0600912 u32 retries = GSI_CHANNEL_STOP_RETRIES;
Alex Elder650d1602020-03-05 22:28:21 -0600913 int ret;
914
Alex Elder650d1602020-03-05 22:28:21 -0600915 do {
916 ret = gsi_channel_stop_command(channel);
917 if (ret != -EAGAIN)
918 break;
Alex Elder3d60e152021-01-13 11:15:31 -0600919 usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
Alex Elder650d1602020-03-05 22:28:21 -0600920 } while (retries--);
921
Alex Elder697e8342021-02-01 11:28:45 -0600922 return ret;
923}
924
Alex Elder893b8382021-02-01 11:28:46 -0600925static int __gsi_channel_stop(struct gsi_channel *channel, bool stop)
Alex Elder697e8342021-02-01 11:28:45 -0600926{
Alex Elder63ec9be2021-02-05 16:10:54 -0600927 struct gsi *gsi = channel->gsi;
Alex Elder697e8342021-02-01 11:28:45 -0600928 int ret;
929
Alex Eldera65c0282021-02-01 11:28:49 -0600930 /* Wait for any underway transactions to complete before stopping. */
Alex Elderbd1ea1e2021-02-01 11:28:47 -0600931 gsi_channel_trans_quiesce(channel);
Alex Elder697e8342021-02-01 11:28:45 -0600932
Alex Elder63ec9be2021-02-05 16:10:54 -0600933 if (!stop)
934 return 0;
Alex Elder650d1602020-03-05 22:28:21 -0600935
Alex Elder63ec9be2021-02-05 16:10:54 -0600936 mutex_lock(&gsi->mutex);
937
938 ret = gsi_channel_stop_retry(channel);
939
940 mutex_unlock(&gsi->mutex);
941
Alex Elderb1750722021-02-05 16:10:55 -0600942 return ret;
Alex Elder650d1602020-03-05 22:28:21 -0600943}
944
Alex Elder893b8382021-02-01 11:28:46 -0600945/* Stop a started channel */
946int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
947{
948 struct gsi_channel *channel = &gsi->channel[channel_id];
Alex Eldera65c0282021-02-01 11:28:49 -0600949 int ret;
Alex Elder893b8382021-02-01 11:28:46 -0600950
Alex Eldera65c0282021-02-01 11:28:49 -0600951 ret = __gsi_channel_stop(channel, true);
952 if (ret)
953 return ret;
954
Alex Elder63ec9be2021-02-05 16:10:54 -0600955 /* Disable the completion interrupt and NAPI if successful */
Alex Eldera65c0282021-02-01 11:28:49 -0600956 gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
957 napi_disable(&channel->napi);
958
959 return 0;
Alex Elder893b8382021-02-01 11:28:46 -0600960}
961
Alex Elderce549932020-11-02 11:53:59 -0600962/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
963void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
Alex Elder650d1602020-03-05 22:28:21 -0600964{
965 struct gsi_channel *channel = &gsi->channel[channel_id];
966
967 mutex_lock(&gsi->mutex);
968
Alex Elder650d1602020-03-05 22:28:21 -0600969 gsi_channel_reset_command(channel);
Alex Eldera3f24052020-05-04 18:30:03 -0500970 /* Due to a hardware quirk we may need to reset RX channels twice. */
Alex Elder9de4a4c2020-11-02 11:53:58 -0600971 if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa)
Alex Elder650d1602020-03-05 22:28:21 -0600972 gsi_channel_reset_command(channel);
973
Alex Elderce549932020-11-02 11:53:59 -0600974 gsi_channel_program(channel, doorbell);
Alex Elder650d1602020-03-05 22:28:21 -0600975 gsi_channel_trans_cancel_pending(channel);
976
977 mutex_unlock(&gsi->mutex);
978}
979
980/* Stop a STARTED channel for suspend (using stop if requested) */
981int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
982{
983 struct gsi_channel *channel = &gsi->channel[channel_id];
Alex Elderb1750722021-02-05 16:10:55 -0600984 int ret;
Alex Elder650d1602020-03-05 22:28:21 -0600985
Alex Elderb1750722021-02-05 16:10:55 -0600986 ret = __gsi_channel_stop(channel, stop);
987 if (ret)
988 return ret;
989
990 /* Ensure NAPI polling has finished. */
991 napi_synchronize(&channel->napi);
992
993 return 0;
Alex Elder650d1602020-03-05 22:28:21 -0600994}
995
996/* Resume a suspended channel (starting will be requested if STOPPED) */
997int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
998{
999 struct gsi_channel *channel = &gsi->channel[channel_id];
1000
Alex Elder893b8382021-02-01 11:28:46 -06001001 return __gsi_channel_start(channel, start);
Alex Elder650d1602020-03-05 22:28:21 -06001002}
1003
1004/**
1005 * gsi_channel_tx_queued() - Report queued TX transfers for a channel
1006 * @channel: Channel for which to report
1007 *
1008 * Report to the network stack the number of bytes and transactions that
1009 * have been queued to hardware since last call. This and the next function
1010 * supply information used by the network stack for throttling.
1011 *
1012 * For each channel we track the number of transactions used and bytes of
1013 * data those transactions represent. We also track what those values are
1014 * each time this function is called. Subtracting the two tells us
1015 * the number of bytes and transactions that have been added between
1016 * successive calls.
1017 *
1018 * Calling this each time we ring the channel doorbell allows us to
1019 * provide accurate information to the network stack about how much
1020 * work we've given the hardware at any point in time.
1021 */
1022void gsi_channel_tx_queued(struct gsi_channel *channel)
1023{
1024 u32 trans_count;
1025 u32 byte_count;
1026
1027 byte_count = channel->byte_count - channel->queued_byte_count;
1028 trans_count = channel->trans_count - channel->queued_trans_count;
1029 channel->queued_byte_count = channel->byte_count;
1030 channel->queued_trans_count = channel->trans_count;
1031
1032 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
1033 trans_count, byte_count);
1034}
1035
1036/**
1037 * gsi_channel_tx_update() - Report completed TX transfers
1038 * @channel: Channel that has completed transmitting packets
1039 * @trans: Last transation known to be complete
1040 *
1041 * Compute the number of transactions and bytes that have been transferred
1042 * over a TX channel since the given transaction was committed. Report this
1043 * information to the network stack.
1044 *
1045 * At the time a transaction is committed, we record its channel's
1046 * committed transaction and byte counts *in the transaction*.
1047 * Completions are signaled by the hardware with an interrupt, and
1048 * we can determine the latest completed transaction at that time.
1049 *
1050 * The difference between the byte/transaction count recorded in
1051 * the transaction and the count last time we recorded a completion
1052 * tells us exactly how much data has been transferred between
1053 * completions.
1054 *
1055 * Calling this each time we learn of a newly-completed transaction
1056 * allows us to provide accurate information to the network stack
1057 * about how much work has been completed by the hardware at a given
1058 * point in time.
1059 */
1060static void
1061gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
1062{
1063 u64 byte_count = trans->byte_count + trans->len;
1064 u64 trans_count = trans->trans_count + 1;
1065
1066 byte_count -= channel->compl_byte_count;
1067 channel->compl_byte_count += byte_count;
1068 trans_count -= channel->compl_trans_count;
1069 channel->compl_trans_count += trans_count;
1070
1071 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
1072 trans_count, byte_count);
1073}
1074
1075/* Channel control interrupt handler */
1076static void gsi_isr_chan_ctrl(struct gsi *gsi)
1077{
1078 u32 channel_mask;
1079
1080 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
1081 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
1082
1083 while (channel_mask) {
1084 u32 channel_id = __ffs(channel_mask);
1085 struct gsi_channel *channel;
1086
1087 channel_mask ^= BIT(channel_id);
1088
1089 channel = &gsi->channel[channel_id];
Alex Elder650d1602020-03-05 22:28:21 -06001090
1091 complete(&channel->completion);
1092 }
1093}
1094
1095/* Event ring control interrupt handler */
1096static void gsi_isr_evt_ctrl(struct gsi *gsi)
1097{
1098 u32 event_mask;
1099
1100 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
1101 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
1102
1103 while (event_mask) {
1104 u32 evt_ring_id = __ffs(event_mask);
1105 struct gsi_evt_ring *evt_ring;
1106
1107 event_mask ^= BIT(evt_ring_id);
1108
1109 evt_ring = &gsi->evt_ring[evt_ring_id];
1110 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
1111
1112 complete(&evt_ring->completion);
1113 }
1114}
1115
1116/* Global channel error interrupt handler */
1117static void
1118gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1119{
Alex Elder7b0ac8f2020-11-10 15:59:20 -06001120 if (code == GSI_OUT_OF_RESOURCES) {
Alex Elder650d1602020-03-05 22:28:21 -06001121 dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1122 complete(&gsi->channel[channel_id].completion);
1123 return;
1124 }
1125
1126 /* Report, but otherwise ignore all other error codes */
1127 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1128 channel_id, err_ee, code);
1129}
1130
1131/* Global event error interrupt handler */
1132static void
1133gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1134{
Alex Elder7b0ac8f2020-11-10 15:59:20 -06001135 if (code == GSI_OUT_OF_RESOURCES) {
Alex Elder650d1602020-03-05 22:28:21 -06001136 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1137 u32 channel_id = gsi_channel_id(evt_ring->channel);
1138
1139 complete(&evt_ring->completion);
1140 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1141 channel_id);
1142 return;
1143 }
1144
1145 /* Report, but otherwise ignore all other error codes */
1146 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1147 evt_ring_id, err_ee, code);
1148}
1149
1150/* Global error interrupt handler */
1151static void gsi_isr_glob_err(struct gsi *gsi)
1152{
1153 enum gsi_err_type type;
1154 enum gsi_err_code code;
1155 u32 which;
1156 u32 val;
1157 u32 ee;
1158
1159 /* Get the logged error, then reinitialize the log */
1160 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1161 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1162 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1163
1164 ee = u32_get_bits(val, ERR_EE_FMASK);
Alex Elder650d1602020-03-05 22:28:21 -06001165 type = u32_get_bits(val, ERR_TYPE_FMASK);
Alex Elderd6c9e3f2020-11-05 12:14:03 -06001166 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
Alex Elder650d1602020-03-05 22:28:21 -06001167 code = u32_get_bits(val, ERR_CODE_FMASK);
1168
1169 if (type == GSI_ERR_TYPE_CHAN)
1170 gsi_isr_glob_chan_err(gsi, ee, which, code);
1171 else if (type == GSI_ERR_TYPE_EVT)
1172 gsi_isr_glob_evt_err(gsi, ee, which, code);
1173 else /* type GSI_ERR_TYPE_GLOB should be fatal */
1174 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1175}
1176
1177/* Generic EE interrupt handler */
1178static void gsi_isr_gp_int1(struct gsi *gsi)
1179{
1180 u32 result;
1181 u32 val;
1182
Alex Elderf849afc2020-11-19 16:49:26 -06001183 /* This interrupt is used to handle completions of the two GENERIC
1184 * GSI commands. We use these to allocate and halt channels on
1185 * the modem's behalf due to a hardware quirk on IPA v4.2. Once
1186 * allocated, the modem "owns" these channels, and as a result we
1187 * have no way of knowing the channel's state at any given time.
1188 *
1189 * It is recommended that we halt the modem channels we allocated
1190 * when shutting down, but it's possible the channel isn't running
1191 * at the time we issue the HALT command. We'll get an error in
1192 * that case, but it's harmless (the channel is already halted).
1193 *
1194 * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
1195 * if we receive it.
1196 */
Alex Elder650d1602020-03-05 22:28:21 -06001197 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1198 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
Alex Elderf849afc2020-11-19 16:49:26 -06001199
1200 switch (result) {
1201 case GENERIC_EE_SUCCESS:
1202 case GENERIC_EE_CHANNEL_NOT_RUNNING:
Alex Elder11361452020-11-19 16:49:27 -06001203 gsi->result = 0;
1204 break;
1205
1206 case GENERIC_EE_RETRY:
1207 gsi->result = -EAGAIN;
Alex Elderf849afc2020-11-19 16:49:26 -06001208 break;
1209
1210 default:
Alex Elder650d1602020-03-05 22:28:21 -06001211 dev_err(gsi->dev, "global INT1 generic result %u\n", result);
Alex Elder11361452020-11-19 16:49:27 -06001212 gsi->result = -EIO;
Alex Elderf849afc2020-11-19 16:49:26 -06001213 break;
1214 }
Alex Elder650d1602020-03-05 22:28:21 -06001215
1216 complete(&gsi->completion);
1217}
Alex Elder0b1ba182020-04-30 16:35:12 -05001218
Alex Elder650d1602020-03-05 22:28:21 -06001219/* Inter-EE interrupt handler */
1220static void gsi_isr_glob_ee(struct gsi *gsi)
1221{
1222 u32 val;
1223
1224 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1225
Alex Elder6c6358c2020-11-10 15:59:17 -06001226 if (val & BIT(ERROR_INT))
Alex Elder650d1602020-03-05 22:28:21 -06001227 gsi_isr_glob_err(gsi);
1228
1229 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1230
Alex Elder6c6358c2020-11-10 15:59:17 -06001231 val &= ~BIT(ERROR_INT);
Alex Elder650d1602020-03-05 22:28:21 -06001232
Alex Elder6c6358c2020-11-10 15:59:17 -06001233 if (val & BIT(GP_INT1)) {
1234 val ^= BIT(GP_INT1);
Alex Elder650d1602020-03-05 22:28:21 -06001235 gsi_isr_gp_int1(gsi);
1236 }
1237
1238 if (val)
1239 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1240}
1241
1242/* I/O completion interrupt event */
1243static void gsi_isr_ieob(struct gsi *gsi)
1244{
1245 u32 event_mask;
1246
1247 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
Alex Elder7bd97852021-01-21 05:48:21 -06001248 gsi_irq_ieob_disable(gsi, event_mask);
Alex Elder195ef572020-05-15 15:07:31 -05001249 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
Alex Elder650d1602020-03-05 22:28:21 -06001250
1251 while (event_mask) {
1252 u32 evt_ring_id = __ffs(event_mask);
1253
1254 event_mask ^= BIT(evt_ring_id);
1255
Alex Elder650d1602020-03-05 22:28:21 -06001256 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1257 }
1258}
1259
1260/* General event interrupts represent serious problems, so report them */
1261static void gsi_isr_general(struct gsi *gsi)
1262{
1263 struct device *dev = gsi->dev;
1264 u32 val;
1265
1266 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1267 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1268
Alex Elder352f26a2020-11-05 12:14:06 -06001269 dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
Alex Elder650d1602020-03-05 22:28:21 -06001270}
1271
1272/**
1273 * gsi_isr() - Top level GSI interrupt service routine
1274 * @irq: Interrupt number (ignored)
1275 * @dev_id: GSI pointer supplied to request_irq()
1276 *
1277 * This is the main handler function registered for the GSI IRQ. Each type
1278 * of interrupt has a separate handler function that is called from here.
1279 */
1280static irqreturn_t gsi_isr(int irq, void *dev_id)
1281{
1282 struct gsi *gsi = dev_id;
1283 u32 intr_mask;
1284 u32 cnt = 0;
1285
Alex Elderf9b28802020-11-05 12:13:58 -06001286 /* enum gsi_irq_type_id defines GSI interrupt types */
Alex Elder650d1602020-03-05 22:28:21 -06001287 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1288 /* intr_mask contains bitmask of pending GSI interrupts */
1289 do {
1290 u32 gsi_intr = BIT(__ffs(intr_mask));
1291
1292 intr_mask ^= gsi_intr;
1293
1294 switch (gsi_intr) {
Alex Elderf9b28802020-11-05 12:13:58 -06001295 case BIT(GSI_CH_CTRL):
Alex Elder650d1602020-03-05 22:28:21 -06001296 gsi_isr_chan_ctrl(gsi);
1297 break;
Alex Elderf9b28802020-11-05 12:13:58 -06001298 case BIT(GSI_EV_CTRL):
Alex Elder650d1602020-03-05 22:28:21 -06001299 gsi_isr_evt_ctrl(gsi);
1300 break;
Alex Elderf9b28802020-11-05 12:13:58 -06001301 case BIT(GSI_GLOB_EE):
Alex Elder650d1602020-03-05 22:28:21 -06001302 gsi_isr_glob_ee(gsi);
1303 break;
Alex Elderf9b28802020-11-05 12:13:58 -06001304 case BIT(GSI_IEOB):
Alex Elder650d1602020-03-05 22:28:21 -06001305 gsi_isr_ieob(gsi);
1306 break;
Alex Elderf9b28802020-11-05 12:13:58 -06001307 case BIT(GSI_GENERAL):
Alex Elder650d1602020-03-05 22:28:21 -06001308 gsi_isr_general(gsi);
1309 break;
1310 default:
1311 dev_err(gsi->dev,
Alex Elder84634882020-06-30 07:58:45 -05001312 "unrecognized interrupt type 0x%08x\n",
1313 gsi_intr);
Alex Elder650d1602020-03-05 22:28:21 -06001314 break;
1315 }
1316 } while (intr_mask);
1317
1318 if (++cnt > GSI_ISR_MAX_ITER) {
1319 dev_err(gsi->dev, "interrupt flood\n");
1320 break;
1321 }
1322 }
1323
1324 return IRQ_HANDLED;
1325}
1326
Alex Elder0b8d6762020-11-05 12:13:56 -06001327static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1328{
1329 struct device *dev = &pdev->dev;
1330 unsigned int irq;
1331 int ret;
1332
1333 ret = platform_get_irq_byname(pdev, "gsi");
1334 if (ret <= 0) {
1335 dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
1336 return ret ? : -EINVAL;
1337 }
1338 irq = ret;
1339
1340 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
1341 if (ret) {
1342 dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
1343 return ret;
1344 }
1345 gsi->irq = irq;
1346
1347 return 0;
1348}
1349
1350static void gsi_irq_exit(struct gsi *gsi)
1351{
1352 free_irq(gsi->irq, gsi);
1353}
1354
Alex Elder650d1602020-03-05 22:28:21 -06001355/* Return the transaction associated with a transfer completion event */
1356static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1357 struct gsi_event *event)
1358{
1359 u32 tre_offset;
1360 u32 tre_index;
1361
1362 /* Event xfer_ptr records the TRE it's associated with */
1363 tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
1364 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1365
1366 return gsi_channel_trans_mapped(channel, tre_index);
1367}
1368
1369/**
1370 * gsi_evt_ring_rx_update() - Record lengths of received data
1371 * @evt_ring: Event ring associated with channel that received packets
1372 * @index: Event index in ring reported by hardware
1373 *
1374 * Events for RX channels contain the actual number of bytes received into
1375 * the buffer. Every event has a transaction associated with it, and here
1376 * we update transactions to record their actual received lengths.
1377 *
1378 * This function is called whenever we learn that the GSI hardware has filled
1379 * new events since the last time we checked. The ring's index field tells
1380 * the first entry in need of processing. The index provided is the
1381 * first *unfilled* event in the ring (following the last filled one).
1382 *
1383 * Events are sequential within the event ring, and transactions are
1384 * sequential within the transaction pool.
1385 *
1386 * Note that @index always refers to an element *within* the event ring.
1387 */
1388static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1389{
1390 struct gsi_channel *channel = evt_ring->channel;
1391 struct gsi_ring *ring = &evt_ring->ring;
1392 struct gsi_trans_info *trans_info;
1393 struct gsi_event *event_done;
1394 struct gsi_event *event;
1395 struct gsi_trans *trans;
1396 u32 byte_count = 0;
1397 u32 old_index;
1398 u32 event_avail;
1399
1400 trans_info = &channel->trans_info;
1401
1402 /* We'll start with the oldest un-processed event. RX channels
1403 * replenish receive buffers in single-TRE transactions, so we
1404 * can just map that event to its transaction. Transactions
1405 * associated with completion events are consecutive.
1406 */
1407 old_index = ring->index;
1408 event = gsi_ring_virt(ring, old_index);
1409 trans = gsi_event_trans(channel, event);
1410
1411 /* Compute the number of events to process before we wrap,
1412 * and determine when we'll be done processing events.
1413 */
1414 event_avail = ring->count - old_index % ring->count;
1415 event_done = gsi_ring_virt(ring, index);
1416 do {
1417 trans->len = __le16_to_cpu(event->len);
1418 byte_count += trans->len;
1419
1420 /* Move on to the next event and transaction */
1421 if (--event_avail)
1422 event++;
1423 else
1424 event = gsi_ring_virt(ring, 0);
1425 trans = gsi_trans_pool_next(&trans_info->pool, trans);
1426 } while (event != event_done);
1427
1428 /* We record RX bytes when they are received */
1429 channel->byte_count += byte_count;
1430 channel->trans_count++;
1431}
1432
1433/* Initialize a ring, including allocating DMA memory for its entries */
1434static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1435{
1436 size_t size = count * GSI_RING_ELEMENT_SIZE;
1437 struct device *dev = gsi->dev;
1438 dma_addr_t addr;
1439
1440 /* Hardware requires a 2^n ring size, with alignment equal to size */
1441 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1442 if (ring->virt && addr % size) {
Dan Carpenter4ace7a62021-02-02 08:55:25 +03001443 dma_free_coherent(dev, size, ring->virt, addr);
Alex Elder650d1602020-03-05 22:28:21 -06001444 dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
Alex Elder84634882020-06-30 07:58:45 -05001445 size);
Alex Elder650d1602020-03-05 22:28:21 -06001446 return -EINVAL; /* Not a good error value, but distinct */
1447 } else if (!ring->virt) {
1448 return -ENOMEM;
1449 }
1450 ring->addr = addr;
1451 ring->count = count;
1452
1453 return 0;
1454}
1455
1456/* Free a previously-allocated ring */
1457static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1458{
1459 size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1460
1461 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1462}
1463
1464/* Allocate an available event ring id */
1465static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1466{
1467 u32 evt_ring_id;
1468
1469 if (gsi->event_bitmap == ~0U) {
1470 dev_err(gsi->dev, "event rings exhausted\n");
1471 return -ENOSPC;
1472 }
1473
1474 evt_ring_id = ffz(gsi->event_bitmap);
1475 gsi->event_bitmap |= BIT(evt_ring_id);
1476
1477 return (int)evt_ring_id;
1478}
1479
1480/* Free a previously-allocated event ring id */
1481static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1482{
1483 gsi->event_bitmap &= ~BIT(evt_ring_id);
1484}
1485
1486/* Ring a channel doorbell, reporting the first un-filled entry */
1487void gsi_channel_doorbell(struct gsi_channel *channel)
1488{
1489 struct gsi_ring *tre_ring = &channel->tre_ring;
1490 u32 channel_id = gsi_channel_id(channel);
1491 struct gsi *gsi = channel->gsi;
1492 u32 val;
1493
1494 /* Note: index *must* be used modulo the ring count here */
1495 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1496 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1497}
1498
1499/* Consult hardware, move any newly completed transactions to completed list */
Alex Elder223f5b32021-01-21 05:48:19 -06001500static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
Alex Elder650d1602020-03-05 22:28:21 -06001501{
1502 u32 evt_ring_id = channel->evt_ring_id;
1503 struct gsi *gsi = channel->gsi;
1504 struct gsi_evt_ring *evt_ring;
1505 struct gsi_trans *trans;
1506 struct gsi_ring *ring;
1507 u32 offset;
1508 u32 index;
1509
1510 evt_ring = &gsi->evt_ring[evt_ring_id];
1511 ring = &evt_ring->ring;
1512
1513 /* See if there's anything new to process; if not, we're done. Note
1514 * that index always refers to an entry *within* the event ring.
1515 */
1516 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1517 index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1518 if (index == ring->index % ring->count)
Alex Elder223f5b32021-01-21 05:48:19 -06001519 return NULL;
Alex Elder650d1602020-03-05 22:28:21 -06001520
1521 /* Get the transaction for the latest completed event. Take a
1522 * reference to keep it from completing before we give the events
1523 * for this and previous transactions back to the hardware.
1524 */
1525 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1526 refcount_inc(&trans->refcount);
1527
1528 /* For RX channels, update each completed transaction with the number
1529 * of bytes that were actually received. For TX channels, report
1530 * the number of transactions and bytes this completion represents
1531 * up the network stack.
1532 */
1533 if (channel->toward_ipa)
1534 gsi_channel_tx_update(channel, trans);
1535 else
1536 gsi_evt_ring_rx_update(evt_ring, index);
1537
1538 gsi_trans_move_complete(trans);
1539
1540 /* Tell the hardware we've handled these events */
1541 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1542
1543 gsi_trans_free(trans);
Alex Elder223f5b32021-01-21 05:48:19 -06001544
1545 return gsi_channel_trans_complete(channel);
Alex Elder650d1602020-03-05 22:28:21 -06001546}
1547
1548/**
1549 * gsi_channel_poll_one() - Return a single completed transaction on a channel
1550 * @channel: Channel to be polled
1551 *
Alex Eldere3eea082020-07-13 07:24:18 -05001552 * Return: Transaction pointer, or null if none are available
Alex Elder650d1602020-03-05 22:28:21 -06001553 *
1554 * This function returns the first entry on a channel's completed transaction
1555 * list. If that list is empty, the hardware is consulted to determine
1556 * whether any new transactions have completed. If so, they're moved to the
1557 * completed list and the new first entry is returned. If there are no more
1558 * completed transactions, a null pointer is returned.
1559 */
1560static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1561{
1562 struct gsi_trans *trans;
1563
1564 /* Get the first transaction from the completed list */
1565 trans = gsi_channel_trans_complete(channel);
Alex Elder223f5b32021-01-21 05:48:19 -06001566 if (!trans) /* List is empty; see if there's more to do */
1567 trans = gsi_channel_update(channel);
Alex Elder650d1602020-03-05 22:28:21 -06001568
1569 if (trans)
1570 gsi_trans_move_polled(trans);
1571
1572 return trans;
1573}
1574
1575/**
1576 * gsi_channel_poll() - NAPI poll function for a channel
1577 * @napi: NAPI structure for the channel
1578 * @budget: Budget supplied by NAPI core
Alex Eldere3eea082020-07-13 07:24:18 -05001579 *
1580 * Return: Number of items polled (<= budget)
Alex Elder650d1602020-03-05 22:28:21 -06001581 *
1582 * Single transactions completed by hardware are polled until either
1583 * the budget is exhausted, or there are no more. Each transaction
1584 * polled is passed to gsi_trans_complete(), to perform remaining
1585 * completion processing and retire/free the transaction.
1586 */
1587static int gsi_channel_poll(struct napi_struct *napi, int budget)
1588{
1589 struct gsi_channel *channel;
Alex Elderc80c4a12021-01-21 05:48:17 -06001590 int count;
Alex Elder650d1602020-03-05 22:28:21 -06001591
1592 channel = container_of(napi, struct gsi_channel, napi);
Alex Elderc80c4a12021-01-21 05:48:17 -06001593 for (count = 0; count < budget; count++) {
Alex Elder650d1602020-03-05 22:28:21 -06001594 struct gsi_trans *trans;
1595
1596 trans = gsi_channel_poll_one(channel);
1597 if (!trans)
1598 break;
1599 gsi_trans_complete(trans);
1600 }
1601
Alex Elder148604e2021-01-21 05:48:18 -06001602 if (count < budget && napi_complete(napi))
Alex Elder57255932021-01-21 05:48:20 -06001603 gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
Alex Elder650d1602020-03-05 22:28:21 -06001604
1605 return count;
1606}
1607
1608/* The event bitmap represents which event ids are available for allocation.
1609 * Set bits are not available, clear bits can be used. This function
1610 * initializes the map so all events supported by the hardware are available,
1611 * then precludes any reserved events from being allocated.
1612 */
1613static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1614{
1615 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1616
1617 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1618
1619 return event_bitmap;
1620}
1621
1622/* Setup function for event rings */
1623static void gsi_evt_ring_setup(struct gsi *gsi)
1624{
1625 /* Nothing to do */
1626}
1627
1628/* Inverse of gsi_evt_ring_setup() */
1629static void gsi_evt_ring_teardown(struct gsi *gsi)
1630{
1631 /* Nothing to do */
1632}
1633
1634/* Setup function for a single channel */
Alex Elderd387c762020-11-02 11:54:00 -06001635static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
Alex Elder650d1602020-03-05 22:28:21 -06001636{
1637 struct gsi_channel *channel = &gsi->channel[channel_id];
1638 u32 evt_ring_id = channel->evt_ring_id;
1639 int ret;
1640
1641 if (!channel->gsi)
1642 return 0; /* Ignore uninitialized channels */
1643
1644 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1645 if (ret)
1646 return ret;
1647
1648 gsi_evt_ring_program(gsi, evt_ring_id);
1649
1650 ret = gsi_channel_alloc_command(gsi, channel_id);
1651 if (ret)
1652 goto err_evt_ring_de_alloc;
1653
Alex Elderd387c762020-11-02 11:54:00 -06001654 gsi_channel_program(channel, true);
Alex Elder650d1602020-03-05 22:28:21 -06001655
1656 if (channel->toward_ipa)
1657 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1658 gsi_channel_poll, NAPI_POLL_WEIGHT);
1659 else
1660 netif_napi_add(&gsi->dummy_dev, &channel->napi,
1661 gsi_channel_poll, NAPI_POLL_WEIGHT);
1662
1663 return 0;
1664
1665err_evt_ring_de_alloc:
1666 /* We've done nothing with the event ring yet so don't reset */
1667 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1668
1669 return ret;
1670}
1671
1672/* Inverse of gsi_channel_setup_one() */
1673static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1674{
1675 struct gsi_channel *channel = &gsi->channel[channel_id];
1676 u32 evt_ring_id = channel->evt_ring_id;
1677
1678 if (!channel->gsi)
1679 return; /* Ignore uninitialized channels */
1680
1681 netif_napi_del(&channel->napi);
1682
1683 gsi_channel_deprogram(channel);
1684 gsi_channel_de_alloc_command(gsi, channel_id);
1685 gsi_evt_ring_reset_command(gsi, evt_ring_id);
1686 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1687}
1688
1689static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1690 enum gsi_generic_cmd_opcode opcode)
1691{
1692 struct completion *completion = &gsi->completion;
Alex Elderd9cbe812021-01-13 11:15:27 -06001693 bool timeout;
Alex Elder650d1602020-03-05 22:28:21 -06001694 u32 val;
1695
Alex Elderd6c9e3f2020-11-05 12:14:03 -06001696 /* The error global interrupt type is always enabled (until we
1697 * teardown), so we won't change that. A generic EE command
1698 * completes with a GSI global interrupt of type GP_INT1. We
1699 * only perform one generic command at a time (to allocate or
1700 * halt a modem channel) and only from this function. So we
1701 * enable the GP_INT1 IRQ type here while we're expecting it.
1702 */
Alex Elder6c6358c2020-11-10 15:59:17 -06001703 val = BIT(ERROR_INT) | BIT(GP_INT1);
Alex Elderd6c9e3f2020-11-05 12:14:03 -06001704 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1705
Alex Elder0b1ba182020-04-30 16:35:12 -05001706 /* First zero the result code field */
1707 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1708 val &= ~GENERIC_EE_RESULT_FMASK;
1709 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1710
1711 /* Now issue the command */
Alex Elder650d1602020-03-05 22:28:21 -06001712 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1713 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1714 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1715
Alex Elderd9cbe812021-01-13 11:15:27 -06001716 timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
Alex Elderd6c9e3f2020-11-05 12:14:03 -06001717
1718 /* Disable the GP_INT1 IRQ type again */
Alex Elder6c6358c2020-11-10 15:59:17 -06001719 iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
Alex Elderd6c9e3f2020-11-05 12:14:03 -06001720
Alex Elderd9cbe812021-01-13 11:15:27 -06001721 if (!timeout)
Alex Elder11361452020-11-19 16:49:27 -06001722 return gsi->result;
Alex Elder650d1602020-03-05 22:28:21 -06001723
1724 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1725 opcode, channel_id);
1726
1727 return -ETIMEDOUT;
1728}
1729
1730static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1731{
1732 return gsi_generic_command(gsi, channel_id,
1733 GSI_GENERIC_ALLOCATE_CHANNEL);
1734}
1735
1736static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1737{
Alex Elder11361452020-11-19 16:49:27 -06001738 u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
1739 int ret;
1740
1741 do
1742 ret = gsi_generic_command(gsi, channel_id,
1743 GSI_GENERIC_HALT_CHANNEL);
1744 while (ret == -EAGAIN && retries--);
1745
1746 if (ret)
1747 dev_err(gsi->dev, "error %d halting modem channel %u\n",
1748 ret, channel_id);
Alex Elder650d1602020-03-05 22:28:21 -06001749}
1750
1751/* Setup function for channels */
Alex Elderd387c762020-11-02 11:54:00 -06001752static int gsi_channel_setup(struct gsi *gsi)
Alex Elder650d1602020-03-05 22:28:21 -06001753{
1754 u32 channel_id = 0;
1755 u32 mask;
1756 int ret;
1757
1758 gsi_evt_ring_setup(gsi);
1759 gsi_irq_enable(gsi);
1760
1761 mutex_lock(&gsi->mutex);
1762
1763 do {
Alex Elderd387c762020-11-02 11:54:00 -06001764 ret = gsi_channel_setup_one(gsi, channel_id);
Alex Elder650d1602020-03-05 22:28:21 -06001765 if (ret)
1766 goto err_unwind;
1767 } while (++channel_id < gsi->channel_count);
1768
1769 /* Make sure no channels were defined that hardware does not support */
1770 while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1771 struct gsi_channel *channel = &gsi->channel[channel_id++];
1772
1773 if (!channel->gsi)
1774 continue; /* Ignore uninitialized channels */
1775
1776 dev_err(gsi->dev, "channel %u not supported by hardware\n",
1777 channel_id - 1);
1778 channel_id = gsi->channel_count;
1779 goto err_unwind;
1780 }
1781
1782 /* Allocate modem channels if necessary */
1783 mask = gsi->modem_channel_bitmap;
1784 while (mask) {
1785 u32 modem_channel_id = __ffs(mask);
1786
1787 ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1788 if (ret)
1789 goto err_unwind_modem;
1790
1791 /* Clear bit from mask only after success (for unwind) */
1792 mask ^= BIT(modem_channel_id);
1793 }
1794
1795 mutex_unlock(&gsi->mutex);
1796
1797 return 0;
1798
1799err_unwind_modem:
1800 /* Compute which modem channels need to be deallocated */
1801 mask ^= gsi->modem_channel_bitmap;
1802 while (mask) {
Alex Elder993cac12020-09-28 18:04:44 -05001803 channel_id = __fls(mask);
Alex Elder650d1602020-03-05 22:28:21 -06001804
1805 mask ^= BIT(channel_id);
1806
1807 gsi_modem_channel_halt(gsi, channel_id);
1808 }
1809
1810err_unwind:
1811 while (channel_id--)
1812 gsi_channel_teardown_one(gsi, channel_id);
1813
1814 mutex_unlock(&gsi->mutex);
1815
1816 gsi_irq_disable(gsi);
1817 gsi_evt_ring_teardown(gsi);
1818
1819 return ret;
1820}
1821
1822/* Inverse of gsi_channel_setup() */
1823static void gsi_channel_teardown(struct gsi *gsi)
1824{
1825 u32 mask = gsi->modem_channel_bitmap;
1826 u32 channel_id;
1827
1828 mutex_lock(&gsi->mutex);
1829
1830 while (mask) {
Alex Elder993cac12020-09-28 18:04:44 -05001831 channel_id = __fls(mask);
Alex Elder650d1602020-03-05 22:28:21 -06001832
1833 mask ^= BIT(channel_id);
1834
1835 gsi_modem_channel_halt(gsi, channel_id);
1836 }
1837
1838 channel_id = gsi->channel_count - 1;
1839 do
1840 gsi_channel_teardown_one(gsi, channel_id);
1841 while (channel_id--);
1842
1843 mutex_unlock(&gsi->mutex);
1844
1845 gsi_irq_disable(gsi);
1846 gsi_evt_ring_teardown(gsi);
1847}
1848
1849/* Setup function for GSI. GSI firmware must be loaded and initialized */
Alex Elderd387c762020-11-02 11:54:00 -06001850int gsi_setup(struct gsi *gsi)
Alex Elder650d1602020-03-05 22:28:21 -06001851{
Alex Elder84634882020-06-30 07:58:45 -05001852 struct device *dev = gsi->dev;
Alex Elder650d1602020-03-05 22:28:21 -06001853 u32 val;
Alex Elder97eb94c2020-11-05 12:13:59 -06001854 int ret;
Alex Elder650d1602020-03-05 22:28:21 -06001855
1856 /* Here is where we first touch the GSI hardware */
1857 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1858 if (!(val & ENABLED_FMASK)) {
Alex Elder84634882020-06-30 07:58:45 -05001859 dev_err(dev, "GSI has not been enabled\n");
Alex Elder650d1602020-03-05 22:28:21 -06001860 return -EIO;
1861 }
1862
Alex Elder97eb94c2020-11-05 12:13:59 -06001863 gsi_irq_setup(gsi);
1864
Alex Elder650d1602020-03-05 22:28:21 -06001865 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1866
1867 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1868 if (!gsi->channel_count) {
Alex Elder84634882020-06-30 07:58:45 -05001869 dev_err(dev, "GSI reports zero channels supported\n");
Alex Elder650d1602020-03-05 22:28:21 -06001870 return -EINVAL;
1871 }
1872 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
Alex Elder84634882020-06-30 07:58:45 -05001873 dev_warn(dev,
1874 "limiting to %u channels; hardware supports %u\n",
Alex Elder650d1602020-03-05 22:28:21 -06001875 GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
1876 gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1877 }
1878
1879 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1880 if (!gsi->evt_ring_count) {
Alex Elder84634882020-06-30 07:58:45 -05001881 dev_err(dev, "GSI reports zero event rings supported\n");
Alex Elder650d1602020-03-05 22:28:21 -06001882 return -EINVAL;
1883 }
1884 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
Alex Elder84634882020-06-30 07:58:45 -05001885 dev_warn(dev,
1886 "limiting to %u event rings; hardware supports %u\n",
Alex Elder650d1602020-03-05 22:28:21 -06001887 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
1888 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1889 }
1890
1891 /* Initialize the error log */
1892 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1893
1894 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1895 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1896
Alex Elder97eb94c2020-11-05 12:13:59 -06001897 ret = gsi_channel_setup(gsi);
1898 if (ret)
1899 gsi_irq_teardown(gsi);
1900
1901 return ret;
Alex Elder650d1602020-03-05 22:28:21 -06001902}
1903
1904/* Inverse of gsi_setup() */
1905void gsi_teardown(struct gsi *gsi)
1906{
1907 gsi_channel_teardown(gsi);
Alex Elder97eb94c2020-11-05 12:13:59 -06001908 gsi_irq_teardown(gsi);
Alex Elder650d1602020-03-05 22:28:21 -06001909}
1910
1911/* Initialize a channel's event ring */
1912static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1913{
1914 struct gsi *gsi = channel->gsi;
1915 struct gsi_evt_ring *evt_ring;
1916 int ret;
1917
1918 ret = gsi_evt_ring_id_alloc(gsi);
1919 if (ret < 0)
1920 return ret;
1921 channel->evt_ring_id = ret;
1922
1923 evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1924 evt_ring->channel = channel;
1925
1926 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1927 if (!ret)
1928 return 0; /* Success! */
1929
1930 dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1931 ret, gsi_channel_id(channel));
1932
1933 gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1934
1935 return ret;
1936}
1937
1938/* Inverse of gsi_channel_evt_ring_init() */
1939static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1940{
1941 u32 evt_ring_id = channel->evt_ring_id;
1942 struct gsi *gsi = channel->gsi;
1943 struct gsi_evt_ring *evt_ring;
1944
1945 evt_ring = &gsi->evt_ring[evt_ring_id];
1946 gsi_ring_free(gsi, &evt_ring->ring);
1947 gsi_evt_ring_id_free(gsi, evt_ring_id);
1948}
1949
1950/* Init function for event rings */
1951static void gsi_evt_ring_init(struct gsi *gsi)
1952{
1953 u32 evt_ring_id = 0;
1954
1955 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
Alex Eldera0545392020-11-05 12:13:57 -06001956 gsi->ieob_enabled_bitmap = 0;
Alex Elder650d1602020-03-05 22:28:21 -06001957 do
1958 init_completion(&gsi->evt_ring[evt_ring_id].completion);
1959 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
1960}
1961
1962/* Inverse of gsi_evt_ring_init() */
1963static void gsi_evt_ring_exit(struct gsi *gsi)
1964{
1965 /* Nothing to do */
1966}
1967
1968static bool gsi_channel_data_valid(struct gsi *gsi,
1969 const struct ipa_gsi_endpoint_data *data)
1970{
1971#ifdef IPA_VALIDATION
1972 u32 channel_id = data->channel_id;
1973 struct device *dev = gsi->dev;
1974
1975 /* Make sure channel ids are in the range driver supports */
1976 if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
Alex Elder84634882020-06-30 07:58:45 -05001977 dev_err(dev, "bad channel id %u; must be less than %u\n",
Alex Elder650d1602020-03-05 22:28:21 -06001978 channel_id, GSI_CHANNEL_COUNT_MAX);
1979 return false;
1980 }
1981
1982 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
Alex Elder84634882020-06-30 07:58:45 -05001983 dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
Alex Elder650d1602020-03-05 22:28:21 -06001984 return false;
1985 }
1986
1987 if (!data->channel.tlv_count ||
1988 data->channel.tlv_count > GSI_TLV_MAX) {
Alex Elder84634882020-06-30 07:58:45 -05001989 dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
Alex Elder650d1602020-03-05 22:28:21 -06001990 channel_id, data->channel.tlv_count, GSI_TLV_MAX);
1991 return false;
1992 }
1993
1994 /* We have to allow at least one maximally-sized transaction to
1995 * be outstanding (which would use tlv_count TREs). Given how
1996 * gsi_channel_tre_max() is computed, tre_count has to be almost
1997 * twice the TLV FIFO size to satisfy this requirement.
1998 */
1999 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
2000 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
2001 channel_id, data->channel.tlv_count,
2002 data->channel.tre_count);
2003 return false;
2004 }
2005
2006 if (!is_power_of_2(data->channel.tre_count)) {
Alex Elder84634882020-06-30 07:58:45 -05002007 dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
Alex Elder650d1602020-03-05 22:28:21 -06002008 channel_id, data->channel.tre_count);
2009 return false;
2010 }
2011
2012 if (!is_power_of_2(data->channel.event_count)) {
Alex Elder84634882020-06-30 07:58:45 -05002013 dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
Alex Elder650d1602020-03-05 22:28:21 -06002014 channel_id, data->channel.event_count);
2015 return false;
2016 }
2017#endif /* IPA_VALIDATION */
2018
2019 return true;
2020}
2021
2022/* Init function for a single channel */
2023static int gsi_channel_init_one(struct gsi *gsi,
2024 const struct ipa_gsi_endpoint_data *data,
Alex Elder14dbf972020-11-02 11:53:56 -06002025 bool command)
Alex Elder650d1602020-03-05 22:28:21 -06002026{
2027 struct gsi_channel *channel;
2028 u32 tre_count;
2029 int ret;
2030
2031 if (!gsi_channel_data_valid(gsi, data))
2032 return -EINVAL;
2033
2034 /* Worst case we need an event for every outstanding TRE */
2035 if (data->channel.tre_count > data->channel.event_count) {
Alex Elder650d1602020-03-05 22:28:21 -06002036 tre_count = data->channel.event_count;
Alex Elder07219992020-04-30 16:35:11 -05002037 dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
2038 data->channel_id, tre_count);
Alex Elder650d1602020-03-05 22:28:21 -06002039 } else {
2040 tre_count = data->channel.tre_count;
2041 }
2042
2043 channel = &gsi->channel[data->channel_id];
2044 memset(channel, 0, sizeof(*channel));
2045
2046 channel->gsi = gsi;
2047 channel->toward_ipa = data->toward_ipa;
2048 channel->command = command;
Alex Elder650d1602020-03-05 22:28:21 -06002049 channel->tlv_count = data->channel.tlv_count;
2050 channel->tre_count = tre_count;
2051 channel->event_count = data->channel.event_count;
2052 init_completion(&channel->completion);
2053
2054 ret = gsi_channel_evt_ring_init(channel);
2055 if (ret)
2056 goto err_clear_gsi;
2057
2058 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
2059 if (ret) {
2060 dev_err(gsi->dev, "error %d allocating channel %u ring\n",
2061 ret, data->channel_id);
2062 goto err_channel_evt_ring_exit;
2063 }
2064
2065 ret = gsi_channel_trans_init(gsi, data->channel_id);
2066 if (ret)
2067 goto err_ring_free;
2068
2069 if (command) {
2070 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
2071
2072 ret = ipa_cmd_pool_init(channel, tre_max);
2073 }
2074 if (!ret)
2075 return 0; /* Success! */
2076
2077 gsi_channel_trans_exit(channel);
2078err_ring_free:
2079 gsi_ring_free(gsi, &channel->tre_ring);
2080err_channel_evt_ring_exit:
2081 gsi_channel_evt_ring_exit(channel);
2082err_clear_gsi:
2083 channel->gsi = NULL; /* Mark it not (fully) initialized */
2084
2085 return ret;
2086}
2087
2088/* Inverse of gsi_channel_init_one() */
2089static void gsi_channel_exit_one(struct gsi_channel *channel)
2090{
2091 if (!channel->gsi)
2092 return; /* Ignore uninitialized channels */
2093
2094 if (channel->command)
2095 ipa_cmd_pool_exit(channel);
2096 gsi_channel_trans_exit(channel);
2097 gsi_ring_free(channel->gsi, &channel->tre_ring);
2098 gsi_channel_evt_ring_exit(channel);
2099}
2100
2101/* Init function for channels */
Alex Elder14dbf972020-11-02 11:53:56 -06002102static int gsi_channel_init(struct gsi *gsi, u32 count,
Alex Elder56dfe8d2020-11-02 11:53:57 -06002103 const struct ipa_gsi_endpoint_data *data)
Alex Elder650d1602020-03-05 22:28:21 -06002104{
Alex Elder56dfe8d2020-11-02 11:53:57 -06002105 bool modem_alloc;
Alex Elder650d1602020-03-05 22:28:21 -06002106 int ret = 0;
2107 u32 i;
2108
Alex Elder56dfe8d2020-11-02 11:53:57 -06002109 /* IPA v4.2 requires the AP to allocate channels for the modem */
2110 modem_alloc = gsi->version == IPA_VERSION_4_2;
2111
Alex Elder650d1602020-03-05 22:28:21 -06002112 gsi_evt_ring_init(gsi);
2113
2114 /* The endpoint data array is indexed by endpoint name */
2115 for (i = 0; i < count; i++) {
2116 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
2117
2118 if (ipa_gsi_endpoint_data_empty(&data[i]))
2119 continue; /* Skip over empty slots */
2120
2121 /* Mark modem channels to be allocated (hardware workaround) */
2122 if (data[i].ee_id == GSI_EE_MODEM) {
2123 if (modem_alloc)
2124 gsi->modem_channel_bitmap |=
2125 BIT(data[i].channel_id);
2126 continue;
2127 }
2128
Alex Elder14dbf972020-11-02 11:53:56 -06002129 ret = gsi_channel_init_one(gsi, &data[i], command);
Alex Elder650d1602020-03-05 22:28:21 -06002130 if (ret)
2131 goto err_unwind;
2132 }
2133
2134 return ret;
2135
2136err_unwind:
2137 while (i--) {
2138 if (ipa_gsi_endpoint_data_empty(&data[i]))
2139 continue;
2140 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2141 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2142 continue;
2143 }
2144 gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2145 }
2146 gsi_evt_ring_exit(gsi);
2147
2148 return ret;
2149}
2150
2151/* Inverse of gsi_channel_init() */
2152static void gsi_channel_exit(struct gsi *gsi)
2153{
2154 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2155
2156 do
2157 gsi_channel_exit_one(&gsi->channel[channel_id]);
2158 while (channel_id--);
2159 gsi->modem_channel_bitmap = 0;
2160
2161 gsi_evt_ring_exit(gsi);
2162}
2163
2164/* Init function for GSI. GSI hardware does not need to be "ready" */
Alex Elder1d0c09d2020-11-02 11:53:55 -06002165int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2166 enum ipa_version version, u32 count,
2167 const struct ipa_gsi_endpoint_data *data)
Alex Elder650d1602020-03-05 22:28:21 -06002168{
Alex Elder84634882020-06-30 07:58:45 -05002169 struct device *dev = &pdev->dev;
Alex Elder650d1602020-03-05 22:28:21 -06002170 struct resource *res;
2171 resource_size_t size;
Alex Eldercdeee492020-11-25 14:45:22 -06002172 u32 adjust;
Alex Elder650d1602020-03-05 22:28:21 -06002173 int ret;
2174
2175 gsi_validate_build();
2176
Alex Elder84634882020-06-30 07:58:45 -05002177 gsi->dev = dev;
Alex Elder14dbf972020-11-02 11:53:56 -06002178 gsi->version = version;
Alex Elder650d1602020-03-05 22:28:21 -06002179
2180 /* The GSI layer performs NAPI on all endpoints. NAPI requires a
2181 * network device structure, but the GSI layer does not have one,
2182 * so we must create a dummy network device for this purpose.
2183 */
2184 init_dummy_netdev(&gsi->dummy_dev);
2185
Alex Elder650d1602020-03-05 22:28:21 -06002186 /* Get GSI memory range and map it */
2187 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2188 if (!res) {
Alex Elder84634882020-06-30 07:58:45 -05002189 dev_err(dev, "DT error getting \"gsi\" memory property\n");
Alex Elder0b8d6762020-11-05 12:13:56 -06002190 return -ENODEV;
Alex Elder650d1602020-03-05 22:28:21 -06002191 }
2192
2193 size = resource_size(res);
2194 if (res->start > U32_MAX || size > U32_MAX - res->start) {
Alex Elder84634882020-06-30 07:58:45 -05002195 dev_err(dev, "DT memory resource \"gsi\" out of range\n");
Alex Elder0b8d6762020-11-05 12:13:56 -06002196 return -EINVAL;
Alex Elder650d1602020-03-05 22:28:21 -06002197 }
2198
Alex Eldercdeee492020-11-25 14:45:22 -06002199 /* Make sure we can make our pointer adjustment if necessary */
2200 adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
2201 if (res->start < adjust) {
2202 dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
2203 adjust);
2204 return -EINVAL;
2205 }
2206
Alex Elder650d1602020-03-05 22:28:21 -06002207 gsi->virt = ioremap(res->start, size);
2208 if (!gsi->virt) {
Alex Elder84634882020-06-30 07:58:45 -05002209 dev_err(dev, "unable to remap \"gsi\" memory\n");
Alex Elder0b8d6762020-11-05 12:13:56 -06002210 return -ENOMEM;
Alex Elder650d1602020-03-05 22:28:21 -06002211 }
Alex Eldercdeee492020-11-25 14:45:22 -06002212 /* Adjust register range pointer downward for newer IPA versions */
2213 gsi->virt -= adjust;
Alex Elder650d1602020-03-05 22:28:21 -06002214
Alex Elder0b8d6762020-11-05 12:13:56 -06002215 init_completion(&gsi->completion);
2216
2217 ret = gsi_irq_init(gsi, pdev);
Alex Elder650d1602020-03-05 22:28:21 -06002218 if (ret)
2219 goto err_iounmap;
2220
Alex Elder0b8d6762020-11-05 12:13:56 -06002221 ret = gsi_channel_init(gsi, count, data);
2222 if (ret)
2223 goto err_irq_exit;
2224
Alex Elder650d1602020-03-05 22:28:21 -06002225 mutex_init(&gsi->mutex);
Alex Elder650d1602020-03-05 22:28:21 -06002226
2227 return 0;
2228
Alex Elder0b8d6762020-11-05 12:13:56 -06002229err_irq_exit:
2230 gsi_irq_exit(gsi);
Alex Elder650d1602020-03-05 22:28:21 -06002231err_iounmap:
2232 iounmap(gsi->virt);
Alex Elder650d1602020-03-05 22:28:21 -06002233
2234 return ret;
2235}
2236
2237/* Inverse of gsi_init() */
2238void gsi_exit(struct gsi *gsi)
2239{
2240 mutex_destroy(&gsi->mutex);
2241 gsi_channel_exit(gsi);
Alex Elder0b8d6762020-11-05 12:13:56 -06002242 gsi_irq_exit(gsi);
Alex Elder650d1602020-03-05 22:28:21 -06002243 iounmap(gsi->virt);
2244}
2245
2246/* The maximum number of outstanding TREs on a channel. This limits
2247 * a channel's maximum number of transactions outstanding (worst case
2248 * is one TRE per transaction).
2249 *
2250 * The absolute limit is the number of TREs in the channel's TRE ring,
2251 * and in theory we should be able use all of them. But in practice,
2252 * doing that led to the hardware reporting exhaustion of event ring
2253 * slots for writing completion information. So the hardware limit
2254 * would be (tre_count - 1).
2255 *
2256 * We reduce it a bit further though. Transaction resource pools are
2257 * sized to be a little larger than this maximum, to allow resource
2258 * allocations to always be contiguous. The number of entries in a
2259 * TRE ring buffer is a power of 2, and the extra resources in a pool
2260 * tends to nearly double the memory allocated for it. Reducing the
2261 * maximum number of outstanding TREs allows the number of entries in
2262 * a pool to avoid crossing that power-of-2 boundary, and this can
2263 * substantially reduce pool memory requirements. The number we
2264 * reduce it by matches the number added in gsi_trans_pool_init().
2265 */
2266u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2267{
2268 struct gsi_channel *channel = &gsi->channel[channel_id];
2269
2270 /* Hardware limit is channel->tre_count - 1 */
2271 return channel->tre_count - (channel->tlv_count - 1);
2272}
2273
2274/* Returns the maximum number of TREs in a single transaction for a channel */
2275u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2276{
2277 struct gsi_channel *channel = &gsi->channel[channel_id];
2278
2279 return channel->tlv_count;
2280}