blob: 5d4fb9fd84bce85e5e5ae3f4da9af16961ad648f [file] [log] [blame]
Ian Munsief204e0b2014-10-08 19:55:02 +11001/*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/spinlock.h>
11#include <linux/sched.h>
12#include <linux/slab.h>
13#include <linux/sched.h>
14#include <linux/mutex.h>
15#include <linux/mm.h>
16#include <linux/uaccess.h>
17#include <asm/synch.h>
Michael Neulingec249dd2015-05-27 16:07:16 +100018#include <misc/cxl-base.h>
Ian Munsief204e0b2014-10-08 19:55:02 +110019
20#include "cxl.h"
Ian Munsie9bcf28c2015-01-09 20:34:36 +110021#include "trace.h"
Ian Munsief204e0b2014-10-08 19:55:02 +110022
23static int afu_control(struct cxl_afu *afu, u64 command,
24 u64 result, u64 mask, bool enabled)
25{
26 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
27 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
Ian Munsie9bcf28c2015-01-09 20:34:36 +110028 int rc = 0;
Ian Munsief204e0b2014-10-08 19:55:02 +110029
30 spin_lock(&afu->afu_cntl_lock);
31 pr_devel("AFU command starting: %llx\n", command);
32
Ian Munsie9bcf28c2015-01-09 20:34:36 +110033 trace_cxl_afu_ctrl(afu, command);
34
Ian Munsief204e0b2014-10-08 19:55:02 +110035 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command);
36
37 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
38 while ((AFU_Cntl & mask) != result) {
39 if (time_after_eq(jiffies, timeout)) {
40 dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
Ian Munsie9bcf28c2015-01-09 20:34:36 +110041 rc = -EBUSY;
42 goto out;
Ian Munsief204e0b2014-10-08 19:55:02 +110043 }
Daniel Axtens0b3f9c72015-08-14 17:41:18 +100044
Christophe Lombard0d400f72016-03-04 12:26:41 +010045 if (!cxl_ops->link_ok(afu->adapter, afu)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +100046 afu->enabled = enabled;
47 rc = -EIO;
48 goto out;
49 }
50
Rasmus Villemoesde369532015-06-11 13:27:52 +020051 pr_devel_ratelimited("AFU control... (0x%016llx)\n",
Ian Munsief204e0b2014-10-08 19:55:02 +110052 AFU_Cntl | command);
53 cpu_relax();
54 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
55 };
56 pr_devel("AFU command complete: %llx\n", command);
57 afu->enabled = enabled;
Ian Munsie9bcf28c2015-01-09 20:34:36 +110058out:
59 trace_cxl_afu_ctrl_done(afu, command, rc);
Ian Munsief204e0b2014-10-08 19:55:02 +110060 spin_unlock(&afu->afu_cntl_lock);
61
Ian Munsie9bcf28c2015-01-09 20:34:36 +110062 return rc;
Ian Munsief204e0b2014-10-08 19:55:02 +110063}
64
65static int afu_enable(struct cxl_afu *afu)
66{
67 pr_devel("AFU enable request\n");
68
69 return afu_control(afu, CXL_AFU_Cntl_An_E,
70 CXL_AFU_Cntl_An_ES_Enabled,
71 CXL_AFU_Cntl_An_ES_MASK, true);
72}
73
74int cxl_afu_disable(struct cxl_afu *afu)
75{
76 pr_devel("AFU disable request\n");
77
78 return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled,
79 CXL_AFU_Cntl_An_ES_MASK, false);
80}
81
82/* This will disable as well as reset */
Frederic Barrat2b04cf32016-03-04 12:26:29 +010083static int native_afu_reset(struct cxl_afu *afu)
Ian Munsief204e0b2014-10-08 19:55:02 +110084{
85 pr_devel("AFU reset request\n");
86
87 return afu_control(afu, CXL_AFU_Cntl_An_RA,
88 CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
89 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
90 false);
91}
92
Frederic Barrat2b04cf32016-03-04 12:26:29 +010093static int native_afu_check_and_enable(struct cxl_afu *afu)
Ian Munsief204e0b2014-10-08 19:55:02 +110094{
Christophe Lombard0d400f72016-03-04 12:26:41 +010095 if (!cxl_ops->link_ok(afu->adapter, afu)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +100096 WARN(1, "Refusing to enable afu while link down!\n");
97 return -EIO;
98 }
Ian Munsief204e0b2014-10-08 19:55:02 +110099 if (afu->enabled)
100 return 0;
101 return afu_enable(afu);
102}
103
104int cxl_psl_purge(struct cxl_afu *afu)
105{
106 u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
107 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
108 u64 dsisr, dar;
109 u64 start, end;
110 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100111 int rc = 0;
112
113 trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
Ian Munsief204e0b2014-10-08 19:55:02 +1100114
115 pr_devel("PSL purge request\n");
116
Christophe Lombard0d400f72016-03-04 12:26:41 +0100117 if (!cxl_ops->link_ok(afu->adapter, afu)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000118 dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
119 rc = -EIO;
120 goto out;
121 }
122
Ian Munsief204e0b2014-10-08 19:55:02 +1100123 if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
124 WARN(1, "psl_purge request while AFU not disabled!\n");
125 cxl_afu_disable(afu);
126 }
127
128 cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
129 PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
130 start = local_clock();
131 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
132 while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK)
133 == CXL_PSL_SCNTL_An_Ps_Pending) {
134 if (time_after_eq(jiffies, timeout)) {
135 dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100136 rc = -EBUSY;
137 goto out;
Ian Munsief204e0b2014-10-08 19:55:02 +1100138 }
Christophe Lombard0d400f72016-03-04 12:26:41 +0100139 if (!cxl_ops->link_ok(afu->adapter, afu)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000140 rc = -EIO;
141 goto out;
142 }
143
Ian Munsief204e0b2014-10-08 19:55:02 +1100144 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
Rasmus Villemoesde369532015-06-11 13:27:52 +0200145 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", PSL_CNTL, dsisr);
Ian Munsief204e0b2014-10-08 19:55:02 +1100146 if (dsisr & CXL_PSL_DSISR_TRANS) {
147 dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
Rasmus Villemoesde369532015-06-11 13:27:52 +0200148 dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr, dar);
Ian Munsief204e0b2014-10-08 19:55:02 +1100149 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
150 } else if (dsisr) {
Rasmus Villemoesde369532015-06-11 13:27:52 +0200151 dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr);
Ian Munsief204e0b2014-10-08 19:55:02 +1100152 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
153 } else {
154 cpu_relax();
155 }
156 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
157 };
158 end = local_clock();
159 pr_devel("PSL purged in %lld ns\n", end - start);
160
161 cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
162 PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100163out:
164 trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
165 return rc;
Ian Munsief204e0b2014-10-08 19:55:02 +1100166}
167
168static int spa_max_procs(int spa_size)
169{
170 /*
171 * From the CAIA:
172 * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
173 * Most of that junk is really just an overly-complicated way of saying
174 * the last 256 bytes are __aligned(128), so it's really:
175 * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
176 * and
177 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
178 * so
179 * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
180 * Ignore the alignment (which is safe in this case as long as we are
181 * careful with our rounding) and solve for n:
182 */
183 return ((spa_size / 8) - 96) / 17;
184}
185
Daniel Axtens051557722015-08-14 17:41:19 +1000186int cxl_alloc_spa(struct cxl_afu *afu)
Ian Munsief204e0b2014-10-08 19:55:02 +1100187{
Ian Munsie895a7982016-05-04 14:46:30 +1000188 unsigned spa_size;
189
Ian Munsief204e0b2014-10-08 19:55:02 +1100190 /* Work out how many pages to allocate */
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100191 afu->native->spa_order = 0;
Ian Munsief204e0b2014-10-08 19:55:02 +1100192 do {
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100193 afu->native->spa_order++;
Ian Munsie895a7982016-05-04 14:46:30 +1000194 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
195
196 if (spa_size > 0x100000) {
197 dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
198 afu->native->spa_max_procs, afu->native->spa_size);
199 afu->num_procs = afu->native->spa_max_procs;
200 break;
201 }
202
203 afu->native->spa_size = spa_size;
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100204 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
205 } while (afu->native->spa_max_procs < afu->num_procs);
Ian Munsief204e0b2014-10-08 19:55:02 +1100206
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100207 if (!(afu->native->spa = (struct cxl_process_element *)
208 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
Ian Munsief204e0b2014-10-08 19:55:02 +1100209 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
210 return -ENOMEM;
211 }
212 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100213 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
Ian Munsief204e0b2014-10-08 19:55:02 +1100214
Daniel Axtens051557722015-08-14 17:41:19 +1000215 return 0;
216}
217
218static void attach_spa(struct cxl_afu *afu)
219{
220 u64 spap;
221
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100222 afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
223 ((afu->native->spa_max_procs + 3) * 128));
Ian Munsief204e0b2014-10-08 19:55:02 +1100224
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100225 spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
226 spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
Ian Munsief204e0b2014-10-08 19:55:02 +1100227 spap |= CXL_PSL_SPAP_V;
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100228 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
229 afu->native->spa, afu->native->spa_max_procs,
230 afu->native->sw_command_status, spap);
Ian Munsief204e0b2014-10-08 19:55:02 +1100231 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
Ian Munsief204e0b2014-10-08 19:55:02 +1100232}
233
Daniel Axtens051557722015-08-14 17:41:19 +1000234static inline void detach_spa(struct cxl_afu *afu)
Ian Munsief204e0b2014-10-08 19:55:02 +1100235{
Ian Munsiedb7933f2014-12-08 19:18:00 +1100236 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
Daniel Axtens051557722015-08-14 17:41:19 +1000237}
238
239void cxl_release_spa(struct cxl_afu *afu)
240{
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100241 if (afu->native->spa) {
242 free_pages((unsigned long) afu->native->spa,
243 afu->native->spa_order);
244 afu->native->spa = NULL;
Daniel Axtens051557722015-08-14 17:41:19 +1000245 }
Ian Munsief204e0b2014-10-08 19:55:02 +1100246}
247
248int cxl_tlb_slb_invalidate(struct cxl *adapter)
249{
250 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
251
252 pr_devel("CXL adapter wide TLBIA & SLBIA\n");
253
254 cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
255
256 cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
257 while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
258 if (time_after_eq(jiffies, timeout)) {
259 dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
260 return -EBUSY;
261 }
Christophe Lombard0d400f72016-03-04 12:26:41 +0100262 if (!cxl_ops->link_ok(adapter, NULL))
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000263 return -EIO;
Ian Munsief204e0b2014-10-08 19:55:02 +1100264 cpu_relax();
265 }
266
267 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
268 while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
269 if (time_after_eq(jiffies, timeout)) {
270 dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
271 return -EBUSY;
272 }
Christophe Lombard0d400f72016-03-04 12:26:41 +0100273 if (!cxl_ops->link_ok(adapter, NULL))
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000274 return -EIO;
Ian Munsief204e0b2014-10-08 19:55:02 +1100275 cpu_relax();
276 }
277 return 0;
278}
279
Ian Munsief204e0b2014-10-08 19:55:02 +1100280static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
281{
282 int rc;
283
284 /* 1. Disable SSTP by writing 0 to SSTP1[V] */
285 cxl_p2n_write(afu, CXL_SSTP1_An, 0);
286
287 /* 2. Invalidate all SLB entries */
288 if ((rc = cxl_afu_slbia(afu)))
289 return rc;
290
291 /* 3. Set SSTP0_An */
292 cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
293
294 /* 4. Set SSTP1_An */
295 cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
296
297 return 0;
298}
299
300/* Using per slice version may improve performance here. (ie. SLBIA_An) */
301static void slb_invalid(struct cxl_context *ctx)
302{
303 struct cxl *adapter = ctx->afu->adapter;
304 u64 slbia;
305
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100306 WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
Ian Munsief204e0b2014-10-08 19:55:02 +1100307
308 cxl_p1_write(adapter, CXL_PSL_LBISEL,
309 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
310 be32_to_cpu(ctx->elem->lpid));
311 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
312
313 while (1) {
Christophe Lombard0d400f72016-03-04 12:26:41 +0100314 if (!cxl_ops->link_ok(adapter, NULL))
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000315 break;
Ian Munsief204e0b2014-10-08 19:55:02 +1100316 slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
317 if (!(slbia & CXL_TLB_SLB_P))
318 break;
319 cpu_relax();
320 }
321}
322
323static int do_process_element_cmd(struct cxl_context *ctx,
324 u64 cmd, u64 pe_state)
325{
326 u64 state;
Ian Munsiea98e6e92014-12-08 19:17:56 +1100327 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100328 int rc = 0;
329
330 trace_cxl_llcmd(ctx, cmd);
Ian Munsief204e0b2014-10-08 19:55:02 +1100331
332 WARN_ON(!ctx->afu->enabled);
333
334 ctx->elem->software_state = cpu_to_be32(pe_state);
335 smp_wmb();
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100336 *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
Ian Munsief204e0b2014-10-08 19:55:02 +1100337 smp_mb();
338 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
339 while (1) {
Ian Munsiea98e6e92014-12-08 19:17:56 +1100340 if (time_after_eq(jiffies, timeout)) {
341 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100342 rc = -EBUSY;
343 goto out;
Ian Munsiea98e6e92014-12-08 19:17:56 +1100344 }
Christophe Lombard0d400f72016-03-04 12:26:41 +0100345 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000346 dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
347 rc = -EIO;
348 goto out;
349 }
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100350 state = be64_to_cpup(ctx->afu->native->sw_command_status);
Ian Munsief204e0b2014-10-08 19:55:02 +1100351 if (state == ~0ULL) {
352 pr_err("cxl: Error adding process element to AFU\n");
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100353 rc = -1;
354 goto out;
Ian Munsief204e0b2014-10-08 19:55:02 +1100355 }
356 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
357 (cmd | (cmd >> 16) | ctx->pe))
358 break;
359 /*
360 * The command won't finish in the PSL if there are
361 * outstanding DSIs. Hence we need to yield here in
362 * case there are outstanding DSIs that we need to
363 * service. Tuning possiblity: we could wait for a
364 * while before sched
365 */
366 schedule();
367
368 }
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100369out:
370 trace_cxl_llcmd_done(ctx, cmd, rc);
371 return rc;
Ian Munsief204e0b2014-10-08 19:55:02 +1100372}
373
374static int add_process_element(struct cxl_context *ctx)
375{
376 int rc = 0;
377
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100378 mutex_lock(&ctx->afu->native->spa_mutex);
Ian Munsief204e0b2014-10-08 19:55:02 +1100379 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
380 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
381 ctx->pe_inserted = true;
382 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100383 mutex_unlock(&ctx->afu->native->spa_mutex);
Ian Munsief204e0b2014-10-08 19:55:02 +1100384 return rc;
385}
386
387static int terminate_process_element(struct cxl_context *ctx)
388{
389 int rc = 0;
390
391 /* fast path terminate if it's already invalid */
392 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
393 return rc;
394
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100395 mutex_lock(&ctx->afu->native->spa_mutex);
Ian Munsief204e0b2014-10-08 19:55:02 +1100396 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000397 /* We could be asked to terminate when the hw is down. That
398 * should always succeed: it's not running if the hw has gone
399 * away and is being reset.
400 */
Christophe Lombard0d400f72016-03-04 12:26:41 +0100401 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000402 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
403 CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
Ian Munsief204e0b2014-10-08 19:55:02 +1100404 ctx->elem->software_state = 0; /* Remove Valid bit */
405 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100406 mutex_unlock(&ctx->afu->native->spa_mutex);
Ian Munsief204e0b2014-10-08 19:55:02 +1100407 return rc;
408}
409
410static int remove_process_element(struct cxl_context *ctx)
411{
412 int rc = 0;
413
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100414 mutex_lock(&ctx->afu->native->spa_mutex);
Ian Munsief204e0b2014-10-08 19:55:02 +1100415 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000416
417 /* We could be asked to remove when the hw is down. Again, if
418 * the hw is down, the PE is gone, so we succeed.
419 */
Christophe Lombard0d400f72016-03-04 12:26:41 +0100420 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000421 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
422
423 if (!rc)
Ian Munsief204e0b2014-10-08 19:55:02 +1100424 ctx->pe_inserted = false;
425 slb_invalid(ctx);
426 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100427 mutex_unlock(&ctx->afu->native->spa_mutex);
Ian Munsief204e0b2014-10-08 19:55:02 +1100428
429 return rc;
430}
431
432
Michael Neuling1a1a94b2015-05-27 16:07:10 +1000433void cxl_assign_psn_space(struct cxl_context *ctx)
Ian Munsief204e0b2014-10-08 19:55:02 +1100434{
435 if (!ctx->afu->pp_size || ctx->master) {
436 ctx->psn_phys = ctx->afu->psn_phys;
437 ctx->psn_size = ctx->afu->adapter->ps_size;
438 } else {
439 ctx->psn_phys = ctx->afu->psn_phys +
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100440 (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
Ian Munsief204e0b2014-10-08 19:55:02 +1100441 ctx->psn_size = ctx->afu->pp_size;
442 }
443}
444
445static int activate_afu_directed(struct cxl_afu *afu)
446{
447 int rc;
448
449 dev_info(&afu->dev, "Activating AFU directed mode\n");
450
Christophe Lombard4108efb2015-10-07 16:07:40 +1100451 afu->num_procs = afu->max_procs_virtualised;
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100452 if (afu->native->spa == NULL) {
Daniel Axtens051557722015-08-14 17:41:19 +1000453 if (cxl_alloc_spa(afu))
454 return -ENOMEM;
455 }
456 attach_spa(afu);
Ian Munsief204e0b2014-10-08 19:55:02 +1100457
458 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
459 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
460 cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
461
462 afu->current_mode = CXL_MODE_DIRECTED;
Ian Munsief204e0b2014-10-08 19:55:02 +1100463
464 if ((rc = cxl_chardev_m_afu_add(afu)))
465 return rc;
466
467 if ((rc = cxl_sysfs_afu_m_add(afu)))
468 goto err;
469
470 if ((rc = cxl_chardev_s_afu_add(afu)))
471 goto err1;
472
473 return 0;
474err1:
475 cxl_sysfs_afu_m_remove(afu);
476err:
477 cxl_chardev_afu_remove(afu);
478 return rc;
479}
480
481#ifdef CONFIG_CPU_LITTLE_ENDIAN
482#define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
483#else
484#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
485#endif
486
Michael Neuling2f663522015-05-27 16:07:13 +1000487static u64 calculate_sr(struct cxl_context *ctx)
488{
489 u64 sr = 0;
490
Frederic Barrate606e032015-12-07 14:34:40 +0100491 set_endian(sr);
Michael Neuling2f663522015-05-27 16:07:13 +1000492 if (ctx->master)
493 sr |= CXL_PSL_SR_An_MP;
494 if (mfspr(SPRN_LPCR) & LPCR_TC)
495 sr |= CXL_PSL_SR_An_TC;
496 if (ctx->kernel) {
497 sr |= CXL_PSL_SR_An_R | (mfmsr() & MSR_SF);
498 sr |= CXL_PSL_SR_An_HV;
499 } else {
500 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
Michael Neuling2f663522015-05-27 16:07:13 +1000501 sr &= ~(CXL_PSL_SR_An_HV);
502 if (!test_tsk_thread_flag(current, TIF_32BIT))
503 sr |= CXL_PSL_SR_An_SF;
504 }
505 return sr;
506}
507
Ian Munsief204e0b2014-10-08 19:55:02 +1100508static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
509{
Michael Neuling2f663522015-05-27 16:07:13 +1000510 u32 pid;
Ian Munsief204e0b2014-10-08 19:55:02 +1100511 int r, result;
512
Michael Neuling1a1a94b2015-05-27 16:07:10 +1000513 cxl_assign_psn_space(ctx);
Ian Munsief204e0b2014-10-08 19:55:02 +1100514
515 ctx->elem->ctxtime = 0; /* disable */
516 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
517 ctx->elem->haurp = 0; /* disable */
518 ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
519
Michael Neuling2f663522015-05-27 16:07:13 +1000520 pid = current->pid;
521 if (ctx->kernel)
522 pid = 0;
Ian Munsief204e0b2014-10-08 19:55:02 +1100523 ctx->elem->common.tid = 0;
Michael Neuling2f663522015-05-27 16:07:13 +1000524 ctx->elem->common.pid = cpu_to_be32(pid);
525
526 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
Ian Munsief204e0b2014-10-08 19:55:02 +1100527
528 ctx->elem->common.csrp = 0; /* disable */
529 ctx->elem->common.aurp0 = 0; /* disable */
530 ctx->elem->common.aurp1 = 0; /* disable */
531
532 cxl_prefault(ctx, wed);
533
534 ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
535 ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
536
Ian Munsie3c206fa2016-05-04 14:52:58 +1000537 /*
538 * Ensure we have the multiplexed PSL interrupt set up to take faults
539 * for kernel contexts that may not have allocated any AFU IRQs at all:
540 */
541 if (ctx->irqs.range[0] == 0) {
542 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
543 ctx->irqs.range[0] = 1;
544 }
545
Ian Munsief204e0b2014-10-08 19:55:02 +1100546 for (r = 0; r < CXL_IRQ_RANGES; r++) {
547 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
548 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
549 }
550
551 ctx->elem->common.amr = cpu_to_be64(amr);
552 ctx->elem->common.wed = cpu_to_be64(wed);
553
554 /* first guy needs to enable */
Frederic Barrat5be587b2016-03-04 12:26:28 +0100555 if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
Ian Munsief204e0b2014-10-08 19:55:02 +1100556 return result;
557
Daniel Axtens368857c2015-07-29 14:07:22 +1000558 return add_process_element(ctx);
Ian Munsief204e0b2014-10-08 19:55:02 +1100559}
560
561static int deactivate_afu_directed(struct cxl_afu *afu)
562{
563 dev_info(&afu->dev, "Deactivating AFU directed mode\n");
564
565 afu->current_mode = 0;
566 afu->num_procs = 0;
567
568 cxl_sysfs_afu_m_remove(afu);
569 cxl_chardev_afu_remove(afu);
570
Frederic Barrat5be587b2016-03-04 12:26:28 +0100571 cxl_ops->afu_reset(afu);
Ian Munsief204e0b2014-10-08 19:55:02 +1100572 cxl_afu_disable(afu);
573 cxl_psl_purge(afu);
574
Ian Munsief204e0b2014-10-08 19:55:02 +1100575 return 0;
576}
577
578static int activate_dedicated_process(struct cxl_afu *afu)
579{
580 dev_info(&afu->dev, "Activating dedicated process mode\n");
581
582 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
583
584 cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
585 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */
586 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
587 cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
588 cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */
589 cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
590
591 cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */
592 cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */
593 cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */
594
595 afu->current_mode = CXL_MODE_DEDICATED;
596 afu->num_procs = 1;
597
598 return cxl_chardev_d_afu_add(afu);
599}
600
601static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
602{
603 struct cxl_afu *afu = ctx->afu;
Michael Neuling2f663522015-05-27 16:07:13 +1000604 u64 pid;
Ian Munsief204e0b2014-10-08 19:55:02 +1100605 int rc;
606
Michael Neuling2f663522015-05-27 16:07:13 +1000607 pid = (u64)current->pid << 32;
608 if (ctx->kernel)
609 pid = 0;
610 cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
611
612 cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
Ian Munsief204e0b2014-10-08 19:55:02 +1100613
614 if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
615 return rc;
616
617 cxl_prefault(ctx, wed);
618
619 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
620 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
621 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
622 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
623 ((u64)ctx->irqs.offset[3] & 0xffff));
624 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
625 (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
626 (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
627 (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
628 ((u64)ctx->irqs.range[3] & 0xffff));
629
630 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
631
632 /* master only context for dedicated */
Michael Neuling1a1a94b2015-05-27 16:07:10 +1000633 cxl_assign_psn_space(ctx);
Ian Munsief204e0b2014-10-08 19:55:02 +1100634
Frederic Barrat5be587b2016-03-04 12:26:28 +0100635 if ((rc = cxl_ops->afu_reset(afu)))
Ian Munsief204e0b2014-10-08 19:55:02 +1100636 return rc;
637
638 cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
639
640 return afu_enable(afu);
641}
642
643static int deactivate_dedicated_process(struct cxl_afu *afu)
644{
645 dev_info(&afu->dev, "Deactivating dedicated process mode\n");
646
647 afu->current_mode = 0;
648 afu->num_procs = 0;
649
650 cxl_chardev_afu_remove(afu);
651
652 return 0;
653}
654
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100655static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
Ian Munsief204e0b2014-10-08 19:55:02 +1100656{
657 if (mode == CXL_MODE_DIRECTED)
658 return deactivate_afu_directed(afu);
659 if (mode == CXL_MODE_DEDICATED)
660 return deactivate_dedicated_process(afu);
661 return 0;
662}
663
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100664static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
Ian Munsief204e0b2014-10-08 19:55:02 +1100665{
666 if (!mode)
667 return 0;
668 if (!(mode & afu->modes_supported))
669 return -EINVAL;
670
Christophe Lombard0d400f72016-03-04 12:26:41 +0100671 if (!cxl_ops->link_ok(afu->adapter, afu)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000672 WARN(1, "Device link is down, refusing to activate!\n");
673 return -EIO;
674 }
675
Ian Munsief204e0b2014-10-08 19:55:02 +1100676 if (mode == CXL_MODE_DIRECTED)
677 return activate_afu_directed(afu);
678 if (mode == CXL_MODE_DEDICATED)
679 return activate_dedicated_process(afu);
680
681 return -EINVAL;
682}
683
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100684static int native_attach_process(struct cxl_context *ctx, bool kernel,
685 u64 wed, u64 amr)
Ian Munsief204e0b2014-10-08 19:55:02 +1100686{
Christophe Lombard0d400f72016-03-04 12:26:41 +0100687 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000688 WARN(1, "Device link is down, refusing to attach process!\n");
689 return -EIO;
690 }
691
Ian Munsief204e0b2014-10-08 19:55:02 +1100692 ctx->kernel = kernel;
693 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
694 return attach_afu_directed(ctx, wed, amr);
695
696 if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
697 return attach_dedicated(ctx, wed, amr);
698
699 return -EINVAL;
700}
701
702static inline int detach_process_native_dedicated(struct cxl_context *ctx)
703{
Frederic Barrat5be587b2016-03-04 12:26:28 +0100704 cxl_ops->afu_reset(ctx->afu);
Ian Munsief204e0b2014-10-08 19:55:02 +1100705 cxl_afu_disable(ctx->afu);
706 cxl_psl_purge(ctx->afu);
707 return 0;
708}
709
Ian Munsief204e0b2014-10-08 19:55:02 +1100710static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
711{
712 if (!ctx->pe_inserted)
713 return 0;
714 if (terminate_process_element(ctx))
715 return -1;
716 if (remove_process_element(ctx))
717 return -1;
718
719 return 0;
720}
721
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100722static int native_detach_process(struct cxl_context *ctx)
Ian Munsief204e0b2014-10-08 19:55:02 +1100723{
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100724 trace_cxl_detach(ctx);
725
Ian Munsief204e0b2014-10-08 19:55:02 +1100726 if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
727 return detach_process_native_dedicated(ctx);
728
729 return detach_process_native_afu_directed(ctx);
730}
731
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100732static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
Ian Munsief204e0b2014-10-08 19:55:02 +1100733{
734 u64 pidtid;
735
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000736 /* If the adapter has gone away, we can't get any meaningful
737 * information.
738 */
Christophe Lombard0d400f72016-03-04 12:26:41 +0100739 if (!cxl_ops->link_ok(afu->adapter, afu))
Daniel Axtens0b3f9c72015-08-14 17:41:18 +1000740 return -EIO;
741
Ian Munsiebc78b052014-11-14 17:37:50 +1100742 info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
743 info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
744 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
745 pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An);
Ian Munsief204e0b2014-10-08 19:55:02 +1100746 info->pid = pidtid >> 32;
747 info->tid = pidtid & 0xffffffff;
Ian Munsiebc78b052014-11-14 17:37:50 +1100748 info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
749 info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
Christophe Lombard444c4ba2016-03-04 12:26:34 +0100750 info->proc_handle = 0;
Ian Munsief204e0b2014-10-08 19:55:02 +1100751
752 return 0;
753}
754
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100755static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
756 u64 dsisr, u64 errstat)
Frederic Barratd56d3012016-03-04 12:26:26 +0100757{
758 u64 fir1, fir2, fir_slice, serr, afu_debug;
759
760 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
761 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
762 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
763 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
764 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
765
766 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
767 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
768 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
769 dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
770 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
771 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
772
773 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
774 cxl_stop_trace(ctx->afu->adapter);
775
Frederic Barrat5be587b2016-03-04 12:26:28 +0100776 return cxl_ops->ack_irq(ctx, 0, errstat);
Frederic Barratd56d3012016-03-04 12:26:26 +0100777}
778
779static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
780{
781 if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
782 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
783 else
784 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
785
786 return IRQ_HANDLED;
787}
788
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100789static irqreturn_t native_irq_multiplexed(int irq, void *data)
Frederic Barratd56d3012016-03-04 12:26:26 +0100790{
791 struct cxl_afu *afu = data;
792 struct cxl_context *ctx;
793 struct cxl_irq_info irq_info;
794 int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
795 int ret;
796
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100797 if ((ret = native_get_irq_info(afu, &irq_info))) {
Frederic Barratd56d3012016-03-04 12:26:26 +0100798 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
799 return fail_psl_irq(afu, &irq_info);
800 }
801
802 rcu_read_lock();
803 ctx = idr_find(&afu->contexts_idr, ph);
804 if (ctx) {
805 ret = cxl_irq(irq, ctx, &irq_info);
806 rcu_read_unlock();
807 return ret;
808 }
809 rcu_read_unlock();
810
811 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
812 " %016llx\n(Possible AFU HW issue - was a term/remove acked"
813 " with outstanding transactions?)\n", ph, irq_info.dsisr,
814 irq_info.dar);
815 return fail_psl_irq(afu, &irq_info);
816}
817
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100818static irqreturn_t native_slice_irq_err(int irq, void *data)
Frederic Barratd56d3012016-03-04 12:26:26 +0100819{
820 struct cxl_afu *afu = data;
821 u64 fir_slice, errstat, serr, afu_debug;
822
823 WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
824
825 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
826 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
827 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
828 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
829 dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
830 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
831 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
832 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
833
834 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
835
836 return IRQ_HANDLED;
837}
838
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100839static irqreturn_t native_irq_err(int irq, void *data)
Frederic Barratd56d3012016-03-04 12:26:26 +0100840{
841 struct cxl *adapter = data;
842 u64 fir1, fir2, err_ivte;
843
844 WARN(1, "CXL ERROR interrupt %i\n", irq);
845
846 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
847 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
848
849 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
850 cxl_stop_trace(adapter);
851
852 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
853 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
854
855 dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
856
857 return IRQ_HANDLED;
858}
859
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100860int cxl_native_register_psl_err_irq(struct cxl *adapter)
Frederic Barratd56d3012016-03-04 12:26:26 +0100861{
862 int rc;
863
864 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
865 dev_name(&adapter->dev));
866 if (!adapter->irq_name)
867 return -ENOMEM;
868
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100869 if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100870 &adapter->native->err_hwirq,
871 &adapter->native->err_virq,
Frederic Barratd56d3012016-03-04 12:26:26 +0100872 adapter->irq_name))) {
873 kfree(adapter->irq_name);
874 adapter->irq_name = NULL;
875 return rc;
876 }
877
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100878 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
Frederic Barratd56d3012016-03-04 12:26:26 +0100879
880 return 0;
881}
882
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100883void cxl_native_release_psl_err_irq(struct cxl *adapter)
Frederic Barratd56d3012016-03-04 12:26:26 +0100884{
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100885 if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
Frederic Barratd56d3012016-03-04 12:26:26 +0100886 return;
887
888 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100889 cxl_unmap_irq(adapter->native->err_virq, adapter);
890 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
Frederic Barratd56d3012016-03-04 12:26:26 +0100891 kfree(adapter->irq_name);
892}
893
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100894int cxl_native_register_serr_irq(struct cxl_afu *afu)
Frederic Barratd56d3012016-03-04 12:26:26 +0100895{
896 u64 serr;
897 int rc;
898
899 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
900 dev_name(&afu->dev));
901 if (!afu->err_irq_name)
902 return -ENOMEM;
903
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100904 if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
Frederic Barratd56d3012016-03-04 12:26:26 +0100905 &afu->serr_hwirq,
906 &afu->serr_virq, afu->err_irq_name))) {
907 kfree(afu->err_irq_name);
908 afu->err_irq_name = NULL;
909 return rc;
910 }
911
912 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
913 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
914 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
915
916 return 0;
917}
918
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100919void cxl_native_release_serr_irq(struct cxl_afu *afu)
Frederic Barratd56d3012016-03-04 12:26:26 +0100920{
921 if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
922 return;
923
924 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
925 cxl_unmap_irq(afu->serr_virq, afu);
Frederic Barrat5be587b2016-03-04 12:26:28 +0100926 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
Frederic Barratd56d3012016-03-04 12:26:26 +0100927 kfree(afu->err_irq_name);
928}
929
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100930int cxl_native_register_psl_irq(struct cxl_afu *afu)
Frederic Barratd56d3012016-03-04 12:26:26 +0100931{
932 int rc;
933
934 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
935 dev_name(&afu->dev));
936 if (!afu->psl_irq_name)
937 return -ENOMEM;
938
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100939 if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
940 afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
Frederic Barratd56d3012016-03-04 12:26:26 +0100941 afu->psl_irq_name))) {
942 kfree(afu->psl_irq_name);
943 afu->psl_irq_name = NULL;
944 }
945 return rc;
946}
947
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100948void cxl_native_release_psl_irq(struct cxl_afu *afu)
Frederic Barratd56d3012016-03-04 12:26:26 +0100949{
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100950 if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
Frederic Barratd56d3012016-03-04 12:26:26 +0100951 return;
952
Christophe Lombardcbffa3a2016-03-04 12:26:35 +0100953 cxl_unmap_irq(afu->native->psl_virq, afu);
954 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
Frederic Barratd56d3012016-03-04 12:26:26 +0100955 kfree(afu->psl_irq_name);
956}
957
Ian Munsief204e0b2014-10-08 19:55:02 +1100958static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
959{
960 u64 dsisr;
961
Rasmus Villemoesde369532015-06-11 13:27:52 +0200962 pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
Ian Munsief204e0b2014-10-08 19:55:02 +1100963
964 /* Clear PSL_DSISR[PE] */
965 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
966 cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
967
968 /* Write 1s to clear error status bits */
969 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
970}
971
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100972static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
Ian Munsief204e0b2014-10-08 19:55:02 +1100973{
Ian Munsie9bcf28c2015-01-09 20:34:36 +1100974 trace_cxl_psl_irq_ack(ctx, tfc);
Ian Munsief204e0b2014-10-08 19:55:02 +1100975 if (tfc)
976 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
977 if (psl_reset_mask)
978 recover_psl_err(ctx->afu, psl_reset_mask);
979
980 return 0;
981}
982
983int cxl_check_error(struct cxl_afu *afu)
984{
985 return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
986}
Frederic Barratd56d3012016-03-04 12:26:26 +0100987
Christophe Lombard47528762016-03-04 12:26:37 +0100988static bool native_support_attributes(const char *attr_name,
989 enum cxl_attrs type)
990{
991 return true;
992}
993
Frederic Barrat2b04cf32016-03-04 12:26:29 +0100994static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
Frederic Barratd56d3012016-03-04 12:26:26 +0100995{
Christophe Lombard0d400f72016-03-04 12:26:41 +0100996 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
Frederic Barrat5be587b2016-03-04 12:26:28 +0100997 return -EIO;
998 if (unlikely(off >= afu->crs_len))
999 return -ERANGE;
Christophe Lombardcbffa3a2016-03-04 12:26:35 +01001000 *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
Frederic Barrat5be587b2016-03-04 12:26:28 +01001001 (cr * afu->crs_len) + off);
1002 return 0;
Frederic Barratd56d3012016-03-04 12:26:26 +01001003}
1004
Frederic Barrat2b04cf32016-03-04 12:26:29 +01001005static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
Frederic Barratd56d3012016-03-04 12:26:26 +01001006{
Christophe Lombard0d400f72016-03-04 12:26:41 +01001007 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
Frederic Barrat5be587b2016-03-04 12:26:28 +01001008 return -EIO;
1009 if (unlikely(off >= afu->crs_len))
1010 return -ERANGE;
Christophe Lombardcbffa3a2016-03-04 12:26:35 +01001011 *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
Frederic Barrat5be587b2016-03-04 12:26:28 +01001012 (cr * afu->crs_len) + off);
1013 return 0;
Frederic Barratd56d3012016-03-04 12:26:26 +01001014}
1015
Frederic Barrat2b04cf32016-03-04 12:26:29 +01001016static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
Frederic Barratd56d3012016-03-04 12:26:26 +01001017{
1018 u64 aligned_off = off & ~0x3L;
1019 u32 val;
Frederic Barrat5be587b2016-03-04 12:26:28 +01001020 int rc;
Frederic Barratd56d3012016-03-04 12:26:26 +01001021
Frederic Barrat2b04cf32016-03-04 12:26:29 +01001022 rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
Frederic Barrat5be587b2016-03-04 12:26:28 +01001023 if (!rc)
1024 *out = (val >> ((off & 0x3) * 8)) & 0xffff;
1025 return rc;
Frederic Barratd56d3012016-03-04 12:26:26 +01001026}
1027
Frederic Barrat2b04cf32016-03-04 12:26:29 +01001028static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
Frederic Barratd56d3012016-03-04 12:26:26 +01001029{
1030 u64 aligned_off = off & ~0x3L;
1031 u32 val;
Frederic Barrat5be587b2016-03-04 12:26:28 +01001032 int rc;
Frederic Barratd56d3012016-03-04 12:26:26 +01001033
Frederic Barrat2b04cf32016-03-04 12:26:29 +01001034 rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
Frederic Barrat5be587b2016-03-04 12:26:28 +01001035 if (!rc)
1036 *out = (val >> ((off & 0x3) * 8)) & 0xff;
1037 return rc;
Frederic Barratd56d3012016-03-04 12:26:26 +01001038}
Frederic Barrat5be587b2016-03-04 12:26:28 +01001039
Frederic Barratd601ea92016-03-04 12:26:40 +01001040static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
1041{
Christophe Lombard0d400f72016-03-04 12:26:41 +01001042 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
Frederic Barratd601ea92016-03-04 12:26:40 +01001043 return -EIO;
1044 if (unlikely(off >= afu->crs_len))
1045 return -ERANGE;
1046 out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1047 (cr * afu->crs_len) + off, in);
1048 return 0;
1049}
1050
1051static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
1052{
1053 u64 aligned_off = off & ~0x3L;
1054 u32 val32, mask, shift;
1055 int rc;
1056
1057 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1058 if (rc)
1059 return rc;
1060 shift = (off & 0x3) * 8;
1061 WARN_ON(shift == 24);
1062 mask = 0xffff << shift;
1063 val32 = (val32 & ~mask) | (in << shift);
1064
1065 rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1066 return rc;
1067}
1068
1069static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
1070{
1071 u64 aligned_off = off & ~0x3L;
1072 u32 val32, mask, shift;
1073 int rc;
1074
1075 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1076 if (rc)
1077 return rc;
1078 shift = (off & 0x3) * 8;
1079 mask = 0xff << shift;
1080 val32 = (val32 & ~mask) | (in << shift);
1081
1082 rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1083 return rc;
1084}
1085
Frederic Barrat5be587b2016-03-04 12:26:28 +01001086const struct cxl_backend_ops cxl_native_ops = {
1087 .module = THIS_MODULE,
Frederic Barrat2b04cf32016-03-04 12:26:29 +01001088 .adapter_reset = cxl_pci_reset,
1089 .alloc_one_irq = cxl_pci_alloc_one_irq,
1090 .release_one_irq = cxl_pci_release_one_irq,
1091 .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
1092 .release_irq_ranges = cxl_pci_release_irq_ranges,
1093 .setup_irq = cxl_pci_setup_irq,
1094 .handle_psl_slice_error = native_handle_psl_slice_error,
Frederic Barrat5be587b2016-03-04 12:26:28 +01001095 .psl_interrupt = NULL,
Frederic Barrat2b04cf32016-03-04 12:26:29 +01001096 .ack_irq = native_ack_irq,
1097 .attach_process = native_attach_process,
1098 .detach_process = native_detach_process,
Christophe Lombard47528762016-03-04 12:26:37 +01001099 .support_attributes = native_support_attributes,
Frederic Barrat5be587b2016-03-04 12:26:28 +01001100 .link_ok = cxl_adapter_link_ok,
Frederic Barrat2b04cf32016-03-04 12:26:29 +01001101 .release_afu = cxl_pci_release_afu,
1102 .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
1103 .afu_check_and_enable = native_afu_check_and_enable,
1104 .afu_activate_mode = native_afu_activate_mode,
1105 .afu_deactivate_mode = native_afu_deactivate_mode,
1106 .afu_reset = native_afu_reset,
1107 .afu_cr_read8 = native_afu_cr_read8,
1108 .afu_cr_read16 = native_afu_cr_read16,
1109 .afu_cr_read32 = native_afu_cr_read32,
1110 .afu_cr_read64 = native_afu_cr_read64,
Frederic Barratd601ea92016-03-04 12:26:40 +01001111 .afu_cr_write8 = native_afu_cr_write8,
1112 .afu_cr_write16 = native_afu_cr_write16,
1113 .afu_cr_write32 = native_afu_cr_write32,
1114 .read_adapter_vpd = cxl_pci_read_adapter_vpd,
Frederic Barrat5be587b2016-03-04 12:26:28 +01001115};