blob: efd0a110091f754570ac6133cf5973a0cf5104c8 [file] [log] [blame]
Ralph Campbellf9315512010-05-23 21:44:54 -07001/*
2 * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file contains all of the code that is specific to the
35 * InfiniPath 7322 chip
36 */
37
38#include <linux/interrupt.h>
39#include <linux/pci.h>
40#include <linux/delay.h>
41#include <linux/io.h>
42#include <linux/jiffies.h>
43#include <rdma/ib_verbs.h>
44#include <rdma/ib_smi.h>
Ralph Campbellf9315512010-05-23 21:44:54 -070045
46#include "qib.h"
47#include "qib_7322_regs.h"
48#include "qib_qsfp.h"
49
50#include "qib_mad.h"
51
52static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
53static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
54static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
55static irqreturn_t qib_7322intr(int irq, void *data);
56static irqreturn_t qib_7322bufavail(int irq, void *data);
57static irqreturn_t sdma_intr(int irq, void *data);
58static irqreturn_t sdma_idle_intr(int irq, void *data);
59static irqreturn_t sdma_progress_intr(int irq, void *data);
60static irqreturn_t sdma_cleanup_intr(int irq, void *data);
61static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
62 struct qib_ctxtdata *rcd);
63static u8 qib_7322_phys_portstate(u64);
64static u32 qib_7322_iblink_state(u64);
65static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
66 u16 linitcmd);
67static void force_h1(struct qib_pportdata *);
68static void adj_tx_serdes(struct qib_pportdata *);
69static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
70static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
71
72static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
73static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
Mike Marciniszyna0a234d2011-01-10 17:42:20 -080074static void serdes_7322_los_enable(struct qib_pportdata *, int);
75static int serdes_7322_init_old(struct qib_pportdata *);
76static int serdes_7322_init_new(struct qib_pportdata *);
Ralph Campbellf9315512010-05-23 21:44:54 -070077
78#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
79
80/* LE2 serdes values for different cases */
81#define LE2_DEFAULT 5
82#define LE2_5m 4
83#define LE2_QME 0
84
85/* Below is special-purpose, so only really works for the IB SerDes blocks. */
86#define IBSD(hw_pidx) (hw_pidx + 2)
87
88/* these are variables for documentation and experimentation purposes */
89static const unsigned rcv_int_timeout = 375;
90static const unsigned rcv_int_count = 16;
91static const unsigned sdma_idle_cnt = 64;
92
93/* Time to stop altering Rx Equalization parameters, after link up. */
94#define RXEQ_DISABLE_MSECS 2500
95
96/*
97 * Number of VLs we are configured to use (to allow for more
98 * credits per vl, etc.)
99 */
100ushort qib_num_cfg_vls = 2;
101module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
102MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
103
104static ushort qib_chase = 1;
105module_param_named(chase, qib_chase, ushort, S_IRUGO);
106MODULE_PARM_DESC(chase, "Enable state chase handling");
107
108static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
109module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
110MODULE_PARM_DESC(long_attenuation, \
111 "attenuation cutoff (dB) for long copper cable setup");
112
113static ushort qib_singleport;
114module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
115MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
116
Mike Marciniszyne67306a2011-07-21 13:21:16 +0000117static ushort qib_krcvq01_no_msi;
118module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
119MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
120
Mike Marciniszyn0a43e112011-01-10 17:42:19 -0800121/*
122 * Receive header queue sizes
123 */
124static unsigned qib_rcvhdrcnt;
125module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
126MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
127
128static unsigned qib_rcvhdrsize;
129module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
130MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
131
132static unsigned qib_rcvhdrentsize;
133module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
134MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
135
Ralph Campbellf9315512010-05-23 21:44:54 -0700136#define MAX_ATTEN_LEN 64 /* plenty for any real system */
137/* for read back, default index is ~5m copper cable */
Ralph Campbella77fcf82010-05-26 16:08:44 -0700138static char txselect_list[MAX_ATTEN_LEN] = "10";
139static struct kparam_string kp_txselect = {
140 .string = txselect_list,
Ralph Campbellf9315512010-05-23 21:44:54 -0700141 .maxlen = MAX_ATTEN_LEN
142};
Ralph Campbella77fcf82010-05-26 16:08:44 -0700143static int setup_txselect(const char *, struct kernel_param *);
144module_param_call(txselect, setup_txselect, param_get_string,
145 &kp_txselect, S_IWUSR | S_IRUGO);
146MODULE_PARM_DESC(txselect, \
147 "Tx serdes indices (for no QSFP or invalid QSFP data)");
Ralph Campbellf9315512010-05-23 21:44:54 -0700148
149#define BOARD_QME7342 5
150#define BOARD_QMH7342 6
151#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
152 BOARD_QMH7342)
153#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
154 BOARD_QME7342)
155
156#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
157
158#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
159
160#define MASK_ACROSS(lsb, msb) \
161 (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
162
163#define SYM_RMASK(regname, fldname) ((u64) \
164 QIB_7322_##regname##_##fldname##_RMASK)
165
166#define SYM_MASK(regname, fldname) ((u64) \
167 QIB_7322_##regname##_##fldname##_RMASK << \
168 QIB_7322_##regname##_##fldname##_LSB)
169
170#define SYM_FIELD(value, regname, fldname) ((u64) \
171 (((value) >> SYM_LSB(regname, fldname)) & \
172 SYM_RMASK(regname, fldname)))
173
174/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
175#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
176 (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
177
178#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
179#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
180#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
181#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
182#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
183/* Below because most, but not all, fields of IntMask have that full suffix */
184#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
185
186
187#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
188
189/*
190 * the size bits give us 2^N, in KB units. 0 marks as invalid,
191 * and 7 is reserved. We currently use only 2KB and 4KB
192 */
193#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
194#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
195#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
196#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
197
198#define SendIBSLIDAssignMask \
199 QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
200#define SendIBSLMCMask \
201 QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
202
203#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
204#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
205#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
206#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
207#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
208#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
209
210#define _QIB_GPIO_SDA_NUM 1
211#define _QIB_GPIO_SCL_NUM 0
212#define QIB_EEPROM_WEN_NUM 14
213#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
214
215/* HW counter clock is at 4nsec */
216#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
217
218/* full speed IB port 1 only */
219#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
220#define PORT_SPD_CAP_SHIFT 3
221
222/* full speed featuremask, both ports */
223#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
224
225/*
226 * This file contains almost all the chip-specific register information and
227 * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
228 */
229
230/* Use defines to tie machine-generated names to lower-case names */
231#define kr_contextcnt KREG_IDX(ContextCnt)
232#define kr_control KREG_IDX(Control)
233#define kr_counterregbase KREG_IDX(CntrRegBase)
234#define kr_errclear KREG_IDX(ErrClear)
235#define kr_errmask KREG_IDX(ErrMask)
236#define kr_errstatus KREG_IDX(ErrStatus)
237#define kr_extctrl KREG_IDX(EXTCtrl)
238#define kr_extstatus KREG_IDX(EXTStatus)
239#define kr_gpio_clear KREG_IDX(GPIOClear)
240#define kr_gpio_mask KREG_IDX(GPIOMask)
241#define kr_gpio_out KREG_IDX(GPIOOut)
242#define kr_gpio_status KREG_IDX(GPIOStatus)
243#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
244#define kr_debugportval KREG_IDX(DebugPortValueReg)
245#define kr_fmask KREG_IDX(feature_mask)
246#define kr_act_fmask KREG_IDX(active_feature_mask)
247#define kr_hwerrclear KREG_IDX(HwErrClear)
248#define kr_hwerrmask KREG_IDX(HwErrMask)
249#define kr_hwerrstatus KREG_IDX(HwErrStatus)
250#define kr_intclear KREG_IDX(IntClear)
251#define kr_intmask KREG_IDX(IntMask)
252#define kr_intredirect KREG_IDX(IntRedirect0)
253#define kr_intstatus KREG_IDX(IntStatus)
254#define kr_pagealign KREG_IDX(PageAlign)
255#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
256#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
257#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
258#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
259#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
260#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
261#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
262#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
263#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
264#define kr_revision KREG_IDX(Revision)
265#define kr_scratch KREG_IDX(Scratch)
266#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
267#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
268#define kr_sendctrl KREG_IDX(SendCtrl)
269#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
270#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
271#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
272#define kr_sendpiobufbase KREG_IDX(SendBufBase)
273#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
274#define kr_sendpiosize KREG_IDX(SendBufSize)
275#define kr_sendregbase KREG_IDX(SendRegBase)
276#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
277#define kr_userregbase KREG_IDX(UserRegBase)
278#define kr_intgranted KREG_IDX(Int_Granted)
279#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
280#define kr_intblocked KREG_IDX(IntBlocked)
281#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
282
283/*
284 * per-port kernel registers. Access only with qib_read_kreg_port()
285 * or qib_write_kreg_port()
286 */
287#define krp_errclear KREG_IBPORT_IDX(ErrClear)
288#define krp_errmask KREG_IBPORT_IDX(ErrMask)
289#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
290#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
291#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
292#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
293#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
294#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
295#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
296#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
297#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
298#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
299#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
300#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
301#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
302#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
303#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
304#define krp_psstart KREG_IBPORT_IDX(PSStart)
305#define krp_psstat KREG_IBPORT_IDX(PSStat)
306#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
307#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
308#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
309#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
310#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
311#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
312#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
313#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
314#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
315#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
316#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
317#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
318#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
319#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
320#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
321#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
322#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
323#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
324#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
325#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
326#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
327#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
328#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
329#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
330#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
331#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
332#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
333#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
334#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
335#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
336#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
337
338/*
Uwe Kleine-Königb5950762010-11-01 15:38:34 -0400339 * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
Ralph Campbellf9315512010-05-23 21:44:54 -0700340 * or qib_write_kreg_ctxt()
341 */
342#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
343#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
344
345/*
346 * TID Flow table, per context. Reduces
347 * number of hdrq updates to one per flow (or on errors).
348 * context 0 and 1 share same memory, but have distinct
349 * addresses. Since for now, we never use expected sends
350 * on kernel contexts, we don't worry about that (we initialize
351 * those entries for ctxt 0/1 on driver load twice, for example).
352 */
353#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
354#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
355
356/* these are the error bits in the tid flows, and are W1C */
357#define TIDFLOW_ERRBITS ( \
358 (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
359 SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
360 (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
361 SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
362
363/* Most (not all) Counters are per-IBport.
364 * Requires LBIntCnt is at offset 0 in the group
365 */
366#define CREG_IDX(regname) \
367((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
368
369#define crp_badformat CREG_IDX(RxVersionErrCnt)
370#define crp_err_rlen CREG_IDX(RxLenErrCnt)
371#define crp_erricrc CREG_IDX(RxICRCErrCnt)
372#define crp_errlink CREG_IDX(RxLinkMalformCnt)
373#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
374#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
375#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
376#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
377#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
378#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
379#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
380#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
381#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
382#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
383#define crp_pktrcv CREG_IDX(RxDataPktCnt)
384#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
385#define crp_pktsend CREG_IDX(TxDataPktCnt)
386#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
387#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
388#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
389#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
390#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
391#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
392#define crp_rcvebp CREG_IDX(RxEBPCnt)
393#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
394#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
395#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
396#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
397#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
398#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
399#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
400#define crp_sendstall CREG_IDX(TxFlowStallCnt)
401#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
402#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
403#define crp_txlenerr CREG_IDX(TxLenErrCnt)
Ralph Campbellf9315512010-05-23 21:44:54 -0700404#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
405#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
406#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
407#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
408#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
409#define crp_wordrcv CREG_IDX(RxDwordCnt)
410#define crp_wordsend CREG_IDX(TxDwordCnt)
411#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
412
413/* these are the (few) counters that are not port-specific */
414#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
415 QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
416#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
417#define cr_lbint CREG_DEVIDX(LBIntCnt)
418#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
419#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
420#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
421#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
422#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
423
424/* no chip register for # of IB ports supported, so define */
425#define NUM_IB_PORTS 2
426
427/* 1 VL15 buffer per hardware IB port, no register for this, so define */
428#define NUM_VL15_BUFS NUM_IB_PORTS
429
430/*
431 * context 0 and 1 are special, and there is no chip register that
432 * defines this value, so we have to define it here.
433 * These are all allocated to either 0 or 1 for single port
434 * hardware configuration, otherwise each gets half
435 */
436#define KCTXT0_EGRCNT 2048
437
438/* values for vl and port fields in PBC, 7322-specific */
439#define PBC_PORT_SEL_LSB 26
440#define PBC_PORT_SEL_RMASK 1
441#define PBC_VL_NUM_LSB 27
442#define PBC_VL_NUM_RMASK 7
443#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
444#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
445
446static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
447 [IB_RATE_2_5_GBPS] = 16,
448 [IB_RATE_5_GBPS] = 8,
449 [IB_RATE_10_GBPS] = 4,
450 [IB_RATE_20_GBPS] = 2,
451 [IB_RATE_30_GBPS] = 2,
452 [IB_RATE_40_GBPS] = 1
453};
454
455#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
456#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
457
458/* link training states, from IBC */
459#define IB_7322_LT_STATE_DISABLED 0x00
460#define IB_7322_LT_STATE_LINKUP 0x01
461#define IB_7322_LT_STATE_POLLACTIVE 0x02
462#define IB_7322_LT_STATE_POLLQUIET 0x03
463#define IB_7322_LT_STATE_SLEEPDELAY 0x04
464#define IB_7322_LT_STATE_SLEEPQUIET 0x05
465#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
466#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
467#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
468#define IB_7322_LT_STATE_CFGIDLE 0x0b
469#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
470#define IB_7322_LT_STATE_TXREVLANES 0x0d
471#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
472#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
473#define IB_7322_LT_STATE_CFGENH 0x10
474#define IB_7322_LT_STATE_CFGTEST 0x11
Mitko Haralanov31264482011-06-09 20:27:26 +0000475#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
476#define IB_7322_LT_STATE_CFGWAITENH 0x13
Ralph Campbellf9315512010-05-23 21:44:54 -0700477
478/* link state machine states from IBC */
479#define IB_7322_L_STATE_DOWN 0x0
480#define IB_7322_L_STATE_INIT 0x1
481#define IB_7322_L_STATE_ARM 0x2
482#define IB_7322_L_STATE_ACTIVE 0x3
483#define IB_7322_L_STATE_ACT_DEFER 0x4
484
485static const u8 qib_7322_physportstate[0x20] = {
486 [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
487 [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
488 [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
489 [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
490 [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
491 [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
492 [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
493 [IB_7322_LT_STATE_CFGRCVFCFG] =
494 IB_PHYSPORTSTATE_CFG_TRAIN,
495 [IB_7322_LT_STATE_CFGWAITRMT] =
496 IB_PHYSPORTSTATE_CFG_TRAIN,
497 [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
498 [IB_7322_LT_STATE_RECOVERRETRAIN] =
499 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
500 [IB_7322_LT_STATE_RECOVERWAITRMT] =
501 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
502 [IB_7322_LT_STATE_RECOVERIDLE] =
503 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
504 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
505 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
Mitko Haralanov31264482011-06-09 20:27:26 +0000506 [IB_7322_LT_STATE_CFGWAITRMTTEST] =
507 IB_PHYSPORTSTATE_CFG_TRAIN,
508 [IB_7322_LT_STATE_CFGWAITENH] =
509 IB_PHYSPORTSTATE_CFG_WAIT_ENH,
Ralph Campbellf9315512010-05-23 21:44:54 -0700510 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
511 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
512 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
513 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
514};
515
516struct qib_chip_specific {
517 u64 __iomem *cregbase;
518 u64 *cntrs;
519 spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
520 spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
521 u64 main_int_mask; /* clear bits which have dedicated handlers */
522 u64 int_enable_mask; /* for per port interrupts in single port mode */
523 u64 errormask;
524 u64 hwerrmask;
525 u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
526 u64 gpio_mask; /* shadow the gpio mask register */
527 u64 extctrl; /* shadow the gpio output enable, etc... */
528 u32 ncntrs;
529 u32 nportcntrs;
530 u32 cntrnamelen;
531 u32 portcntrnamelen;
532 u32 numctxts;
533 u32 rcvegrcnt;
534 u32 updthresh; /* current AvailUpdThld */
535 u32 updthresh_dflt; /* default AvailUpdThld */
536 u32 r1;
537 int irq;
538 u32 num_msix_entries;
539 u32 sdmabufcnt;
540 u32 lastbuf_for_pio;
541 u32 stay_in_freeze;
542 u32 recovery_ports_initted;
Ralph Campbellf9315512010-05-23 21:44:54 -0700543 struct msix_entry *msix_entries;
544 void **msix_arg;
545 unsigned long *sendchkenable;
546 unsigned long *sendgrhchk;
547 unsigned long *sendibchk;
548 u32 rcvavail_timeout[18];
549 char emsgbuf[128]; /* for device error interrupt msg buffer */
550};
551
552/* Table of entries in "human readable" form Tx Emphasis. */
553struct txdds_ent {
554 u8 amp;
555 u8 pre;
556 u8 main;
557 u8 post;
558};
559
560struct vendor_txdds_ent {
561 u8 oui[QSFP_VOUI_LEN];
562 u8 *partnum;
563 struct txdds_ent sdr;
564 struct txdds_ent ddr;
565 struct txdds_ent qdr;
566};
567
568static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
569
570#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
Ralph Campbell7c7a4162010-06-17 23:14:09 +0000571#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
Mike Marciniszyne7062032011-01-10 17:42:21 -0800572#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
Ralph Campbellf9315512010-05-23 21:44:54 -0700573#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
574
575#define H1_FORCE_VAL 8
Ralph Campbella77fcf82010-05-26 16:08:44 -0700576#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
577#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
Ralph Campbellf9315512010-05-23 21:44:54 -0700578
579/* The static and dynamic registers are paired, and the pairs indexed by spd */
580#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
581 + ((spd) * 2))
582
583#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
584#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
585#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
586#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
587#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
588
Ralph Campbellf9315512010-05-23 21:44:54 -0700589struct qib_chippport_specific {
590 u64 __iomem *kpregbase;
591 u64 __iomem *cpregbase;
592 u64 *portcntrs;
593 struct qib_pportdata *ppd;
594 wait_queue_head_t autoneg_wait;
595 struct delayed_work autoneg_work;
596 struct delayed_work ipg_work;
597 struct timer_list chase_timer;
598 /*
599 * these 5 fields are used to establish deltas for IB symbol
600 * errors and linkrecovery errors. They can be reported on
601 * some chips during link negotiation prior to INIT, and with
602 * DDR when faking DDR negotiations with non-IBTA switches.
603 * The chip counters are adjusted at driver unload if there is
604 * a non-zero delta.
605 */
606 u64 ibdeltainprog;
607 u64 ibsymdelta;
608 u64 ibsymsnap;
609 u64 iblnkerrdelta;
610 u64 iblnkerrsnap;
611 u64 iblnkdownsnap;
612 u64 iblnkdowndelta;
613 u64 ibmalfdelta;
614 u64 ibmalfsnap;
615 u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
616 u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
617 u64 qdr_dfe_time;
618 u64 chase_end;
619 u32 autoneg_tries;
620 u32 recovery_init;
621 u32 qdr_dfe_on;
622 u32 qdr_reforce;
623 /*
624 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
625 * entry zero is unused, to simplify indexing
626 */
Ralph Campbella77fcf82010-05-26 16:08:44 -0700627 u8 h1_val;
628 u8 no_eep; /* txselect table index to use if no qsfp info */
Ralph Campbellf9315512010-05-23 21:44:54 -0700629 u8 ipg_tries;
630 u8 ibmalfusesnap;
631 struct qib_qsfp_data qsfp_data;
632 char epmsgbuf[192]; /* for port error interrupt msg buffer */
633};
634
635static struct {
636 const char *name;
637 irq_handler_t handler;
638 int lsb;
639 int port; /* 0 if not port-specific, else port # */
640} irq_table[] = {
641 { QIB_DRV_NAME, qib_7322intr, -1, 0 },
642 { QIB_DRV_NAME " (buf avail)", qib_7322bufavail,
643 SYM_LSB(IntStatus, SendBufAvail), 0 },
644 { QIB_DRV_NAME " (sdma 0)", sdma_intr,
645 SYM_LSB(IntStatus, SDmaInt_0), 1 },
646 { QIB_DRV_NAME " (sdma 1)", sdma_intr,
647 SYM_LSB(IntStatus, SDmaInt_1), 2 },
648 { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr,
649 SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
650 { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr,
651 SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
652 { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr,
653 SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
654 { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr,
655 SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
656 { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr,
657 SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
658 { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr,
659 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
660};
661
Ralph Campbellf9315512010-05-23 21:44:54 -0700662/* ibcctrl bits */
663#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
664/* cycle through TS1/TS2 till OK */
665#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
666/* wait for TS1, then go on */
667#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
668#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
669
670#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
671#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
672#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
673
674#define BLOB_7322_IBCHG 0x101
675
676static inline void qib_write_kreg(const struct qib_devdata *dd,
677 const u32 regno, u64 value);
678static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
679static void write_7322_initregs(struct qib_devdata *);
680static void write_7322_init_portregs(struct qib_pportdata *);
681static void setup_7322_link_recovery(struct qib_pportdata *, u32);
682static void check_7322_rxe_status(struct qib_pportdata *);
683static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
684
685/**
686 * qib_read_ureg32 - read 32-bit virtualized per-context register
687 * @dd: device
688 * @regno: register number
689 * @ctxt: context number
690 *
691 * Return the contents of a register that is virtualized to be per context.
692 * Returns -1 on errors (not distinguishable from valid contents at
693 * runtime; we may add a separate error variable at some point).
694 */
695static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
696 enum qib_ureg regno, int ctxt)
697{
698 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
699 return 0;
700 return readl(regno + (u64 __iomem *)(
701 (dd->ureg_align * ctxt) + (dd->userbase ?
702 (char __iomem *)dd->userbase :
703 (char __iomem *)dd->kregbase + dd->uregbase)));
704}
705
706/**
707 * qib_read_ureg - read virtualized per-context register
708 * @dd: device
709 * @regno: register number
710 * @ctxt: context number
711 *
712 * Return the contents of a register that is virtualized to be per context.
713 * Returns -1 on errors (not distinguishable from valid contents at
714 * runtime; we may add a separate error variable at some point).
715 */
716static inline u64 qib_read_ureg(const struct qib_devdata *dd,
717 enum qib_ureg regno, int ctxt)
718{
719
720 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
721 return 0;
722 return readq(regno + (u64 __iomem *)(
723 (dd->ureg_align * ctxt) + (dd->userbase ?
724 (char __iomem *)dd->userbase :
725 (char __iomem *)dd->kregbase + dd->uregbase)));
726}
727
728/**
729 * qib_write_ureg - write virtualized per-context register
730 * @dd: device
731 * @regno: register number
732 * @value: value
733 * @ctxt: context
734 *
735 * Write the contents of a register that is virtualized to be per context.
736 */
737static inline void qib_write_ureg(const struct qib_devdata *dd,
738 enum qib_ureg regno, u64 value, int ctxt)
739{
740 u64 __iomem *ubase;
741 if (dd->userbase)
742 ubase = (u64 __iomem *)
743 ((char __iomem *) dd->userbase +
744 dd->ureg_align * ctxt);
745 else
746 ubase = (u64 __iomem *)
747 (dd->uregbase +
748 (char __iomem *) dd->kregbase +
749 dd->ureg_align * ctxt);
750
751 if (dd->kregbase && (dd->flags & QIB_PRESENT))
752 writeq(value, &ubase[regno]);
753}
754
755static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
756 const u32 regno)
757{
758 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
759 return -1;
760 return readl((u32 __iomem *) &dd->kregbase[regno]);
761}
762
763static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
764 const u32 regno)
765{
766 if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
767 return -1;
768 return readq(&dd->kregbase[regno]);
769}
770
771static inline void qib_write_kreg(const struct qib_devdata *dd,
772 const u32 regno, u64 value)
773{
774 if (dd->kregbase && (dd->flags & QIB_PRESENT))
775 writeq(value, &dd->kregbase[regno]);
776}
777
778/*
779 * not many sanity checks for the port-specific kernel register routines,
780 * since they are only used when it's known to be safe.
781*/
782static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
783 const u16 regno)
784{
785 if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
786 return 0ULL;
787 return readq(&ppd->cpspec->kpregbase[regno]);
788}
789
790static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
791 const u16 regno, u64 value)
792{
793 if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
794 (ppd->dd->flags & QIB_PRESENT))
795 writeq(value, &ppd->cpspec->kpregbase[regno]);
796}
797
798/**
799 * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
800 * @dd: the qlogic_ib device
801 * @regno: the register number to write
802 * @ctxt: the context containing the register
803 * @value: the value to write
804 */
805static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
806 const u16 regno, unsigned ctxt,
807 u64 value)
808{
809 qib_write_kreg(dd, regno + ctxt, value);
810}
811
812static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
813{
814 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
815 return 0;
816 return readq(&dd->cspec->cregbase[regno]);
817
818
819}
820
821static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
822{
823 if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
824 return 0;
825 return readl(&dd->cspec->cregbase[regno]);
826
827
828}
829
830static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
831 u16 regno, u64 value)
832{
833 if (ppd->cpspec && ppd->cpspec->cpregbase &&
834 (ppd->dd->flags & QIB_PRESENT))
835 writeq(value, &ppd->cpspec->cpregbase[regno]);
836}
837
838static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
839 u16 regno)
840{
841 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
842 !(ppd->dd->flags & QIB_PRESENT))
843 return 0;
844 return readq(&ppd->cpspec->cpregbase[regno]);
845}
846
847static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
848 u16 regno)
849{
850 if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
851 !(ppd->dd->flags & QIB_PRESENT))
852 return 0;
853 return readl(&ppd->cpspec->cpregbase[regno]);
854}
855
856/* bits in Control register */
857#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
858#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
859
860/* bits in general interrupt regs */
861#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
862#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
863#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
864#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
865#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
866#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
867#define QIB_I_C_ERROR INT_MASK(Err)
868
869#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
870#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
871#define QIB_I_GPIO INT_MASK(AssertGPIO)
872#define QIB_I_P_SDMAINT(pidx) \
873 (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
874 INT_MASK_P(SDmaProgress, pidx) | \
875 INT_MASK_PM(SDmaCleanupDone, pidx))
876
877/* Interrupt bits that are "per port" */
878#define QIB_I_P_BITSEXTANT(pidx) \
879 (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
880 INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
881 INT_MASK_P(SDmaProgress, pidx) | \
882 INT_MASK_PM(SDmaCleanupDone, pidx))
883
884/* Interrupt bits that are common to a device */
885/* currently unused: QIB_I_SPIOSENT */
886#define QIB_I_C_BITSEXTANT \
887 (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
888 QIB_I_SPIOSENT | \
889 QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
890
891#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
892 QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
893
894/*
895 * Error bits that are "per port".
896 */
897#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
898#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
899#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
900#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
901#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
902#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
903#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
904#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
905#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
906#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
907#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
908#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
909#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
910#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
911#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
912#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
913#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
914#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
915#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
916#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
917#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
918#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
919#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
920#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
921#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
922#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
923#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
924#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
925
926#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
927#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
928#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
929#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
930#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
931#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
932#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
933#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
934#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
935#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
936#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
937
938/* Error bits that are common to a device */
939#define QIB_E_RESET ERR_MASK(ResetNegated)
940#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
941#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
942
943
944/*
945 * Per chip (rather than per-port) errors. Most either do
946 * nothing but trigger a print (because they self-recover, or
947 * always occur in tandem with other errors that handle the
948 * issue), or because they indicate errors with no recovery,
949 * but we want to know that they happened.
950 */
951#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
952#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
953#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
954#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
955#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
956#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
957#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
958#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
959
960/* SDMA chip errors (not per port)
961 * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
962 * the SDMAHALT error immediately, so we just print the dup error via the
963 * E_AUTO mechanism. This is true of most of the per-port fatal errors
964 * as well, but since this is port-independent, by definition, it's
965 * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
966 * packet send errors, and so are handled in the same manner as other
967 * per-packet errors.
968 */
969#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
970#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
971#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
972
973/*
974 * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
975 * it is used to print "common" packet errors.
976 */
977#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
978 QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
979 QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
980 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
981 QIB_E_P_REBP)
982
983/* Error Bits that Packet-related (Receive, per-port) */
984#define QIB_E_P_RPKTERRS (\
985 QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
986 QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
987 QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
988 QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
989 QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
990 QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
991
992/*
993 * Error bits that are Send-related (per port)
994 * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
995 * All of these potentially need to have a buffer disarmed
996 */
997#define QIB_E_P_SPKTERRS (\
998 QIB_E_P_SUNEXP_PKTNUM |\
999 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1000 QIB_E_P_SMAXPKTLEN |\
1001 QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1002 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1003 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1004
1005#define QIB_E_SPKTERRS ( \
1006 QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1007 ERR_MASK_N(SendUnsupportedVLErr) | \
1008 QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1009
1010#define QIB_E_P_SDMAERRS ( \
1011 QIB_E_P_SDMAHALT | \
1012 QIB_E_P_SDMADESCADDRMISALIGN | \
1013 QIB_E_P_SDMAUNEXPDATA | \
1014 QIB_E_P_SDMAMISSINGDW | \
1015 QIB_E_P_SDMADWEN | \
1016 QIB_E_P_SDMARPYTAG | \
1017 QIB_E_P_SDMA1STDESC | \
1018 QIB_E_P_SDMABASE | \
1019 QIB_E_P_SDMATAILOUTOFBOUND | \
1020 QIB_E_P_SDMAOUTOFBOUND | \
1021 QIB_E_P_SDMAGENMISMATCH)
1022
1023/*
1024 * This sets some bits more than once, but makes it more obvious which
1025 * bits are not handled under other categories, and the repeat definition
1026 * is not a problem.
1027 */
1028#define QIB_E_P_BITSEXTANT ( \
1029 QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1030 QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1031 QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1032 QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1033 )
1034
1035/*
1036 * These are errors that can occur when the link
1037 * changes state while a packet is being sent or received. This doesn't
1038 * cover things like EBP or VCRC that can be the result of a sending
1039 * having the link change state, so we receive a "known bad" packet.
1040 * All of these are "per port", so renamed:
1041 */
1042#define QIB_E_P_LINK_PKTERRS (\
1043 QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1044 QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1045 QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1046 QIB_E_P_RUNEXPCHAR)
1047
1048/*
1049 * This sets some bits more than once, but makes it more obvious which
1050 * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1051 * and the repeat definition is not a problem.
1052 */
1053#define QIB_E_C_BITSEXTANT (\
1054 QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1055 QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1056 QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1057
1058/* Likewise Neuter E_SPKT_ERRS_IGNORE */
1059#define E_SPKT_ERRS_IGNORE 0
1060
1061#define QIB_EXTS_MEMBIST_DISABLED \
1062 SYM_MASK(EXTStatus, MemBISTDisabled)
1063#define QIB_EXTS_MEMBIST_ENDTEST \
1064 SYM_MASK(EXTStatus, MemBISTEndTest)
1065
1066#define QIB_E_SPIOARMLAUNCH \
1067 ERR_MASK(SendArmLaunchErr)
1068
1069#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1070#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1071
1072/*
1073 * IBTA_1_2 is set when multiple speeds are enabled (normal),
1074 * and also if forced QDR (only QDR enabled). It's enabled for the
1075 * forced QDR case so that scrambling will be enabled by the TS3
1076 * exchange, when supported by both sides of the link.
1077 */
1078#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1079#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1080#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1081#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1082#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1083#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1084 SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1085#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1086
1087#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1088#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1089
1090#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1091#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1092#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1093
1094#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1095#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1096#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1097 SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1098#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1099 SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1100#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1101
1102#define IBA7322_REDIRECT_VEC_PER_REG 12
1103
1104#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1105#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1106#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1107#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1108#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1109
1110#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1111
1112#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001113 .msg = #fldname , .sz = sizeof(#fldname) }
Ralph Campbellf9315512010-05-23 21:44:54 -07001114#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001115 fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
Ralph Campbellf9315512010-05-23 21:44:54 -07001116static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1117 HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1118 HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1119 HWE_AUTO(PCIESerdesPClkNotDetect),
1120 HWE_AUTO(PowerOnBISTFailed),
1121 HWE_AUTO(TempsenseTholdReached),
1122 HWE_AUTO(MemoryErr),
1123 HWE_AUTO(PCIeBusParityErr),
1124 HWE_AUTO(PcieCplTimeout),
1125 HWE_AUTO(PciePoisonedTLP),
1126 HWE_AUTO_P(SDmaMemReadErr, 1),
1127 HWE_AUTO_P(SDmaMemReadErr, 0),
1128 HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
Ralph Campbellb9e03e02010-06-17 23:13:54 +00001129 HWE_AUTO_P(IBCBusToSPCParityErr, 1),
Ralph Campbellf9315512010-05-23 21:44:54 -07001130 HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
Ralph Campbellb9e03e02010-06-17 23:13:54 +00001131 HWE_AUTO(statusValidNoEop),
Ralph Campbellf9315512010-05-23 21:44:54 -07001132 HWE_AUTO(LATriggered),
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001133 { .mask = 0, .sz = 0 }
Ralph Campbellf9315512010-05-23 21:44:54 -07001134};
1135
1136#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001137 .msg = #fldname, .sz = sizeof(#fldname) }
Ralph Campbellf9315512010-05-23 21:44:54 -07001138#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001139 .msg = #fldname, .sz = sizeof(#fldname) }
Ralph Campbellf9315512010-05-23 21:44:54 -07001140static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001141 E_AUTO(RcvEgrFullErr),
1142 E_AUTO(RcvHdrFullErr),
Ralph Campbellf9315512010-05-23 21:44:54 -07001143 E_AUTO(ResetNegated),
1144 E_AUTO(HardwareErr),
1145 E_AUTO(InvalidAddrErr),
1146 E_AUTO(SDmaVL15Err),
1147 E_AUTO(SBufVL15MisUseErr),
1148 E_AUTO(InvalidEEPCmd),
1149 E_AUTO(RcvContextShareErr),
1150 E_AUTO(SendVLMismatchErr),
1151 E_AUTO(SendArmLaunchErr),
1152 E_AUTO(SendSpecialTriggerErr),
1153 E_AUTO(SDmaWrongPortErr),
1154 E_AUTO(SDmaBufMaskDuplicateErr),
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001155 { .mask = 0, .sz = 0 }
Ralph Campbellf9315512010-05-23 21:44:54 -07001156};
1157
1158static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1159 E_P_AUTO(IBStatusChanged),
1160 E_P_AUTO(SHeadersErr),
1161 E_P_AUTO(VL15BufMisuseErr),
1162 /*
1163 * SDmaHaltErr is not really an error, make it clearer;
1164 */
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001165 {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1166 .sz = 11},
Ralph Campbellf9315512010-05-23 21:44:54 -07001167 E_P_AUTO(SDmaDescAddrMisalignErr),
1168 E_P_AUTO(SDmaUnexpDataErr),
1169 E_P_AUTO(SDmaMissingDwErr),
1170 E_P_AUTO(SDmaDwEnErr),
1171 E_P_AUTO(SDmaRpyTagErr),
1172 E_P_AUTO(SDma1stDescErr),
1173 E_P_AUTO(SDmaBaseErr),
1174 E_P_AUTO(SDmaTailOutOfBoundErr),
1175 E_P_AUTO(SDmaOutOfBoundErr),
1176 E_P_AUTO(SDmaGenMismatchErr),
1177 E_P_AUTO(SendBufMisuseErr),
1178 E_P_AUTO(SendUnsupportedVLErr),
1179 E_P_AUTO(SendUnexpectedPktNumErr),
1180 E_P_AUTO(SendDroppedDataPktErr),
1181 E_P_AUTO(SendDroppedSmpPktErr),
1182 E_P_AUTO(SendPktLenErr),
1183 E_P_AUTO(SendUnderRunErr),
1184 E_P_AUTO(SendMaxPktLenErr),
1185 E_P_AUTO(SendMinPktLenErr),
1186 E_P_AUTO(RcvIBLostLinkErr),
1187 E_P_AUTO(RcvHdrErr),
1188 E_P_AUTO(RcvHdrLenErr),
1189 E_P_AUTO(RcvBadTidErr),
1190 E_P_AUTO(RcvBadVersionErr),
1191 E_P_AUTO(RcvIBFlowErr),
1192 E_P_AUTO(RcvEBPErr),
1193 E_P_AUTO(RcvUnsupportedVLErr),
1194 E_P_AUTO(RcvUnexpectedCharErr),
1195 E_P_AUTO(RcvShortPktLenErr),
1196 E_P_AUTO(RcvLongPktLenErr),
1197 E_P_AUTO(RcvMaxPktLenErr),
1198 E_P_AUTO(RcvMinPktLenErr),
1199 E_P_AUTO(RcvICRCErr),
1200 E_P_AUTO(RcvVCRCErr),
1201 E_P_AUTO(RcvFormatErr),
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001202 { .mask = 0, .sz = 0 }
Ralph Campbellf9315512010-05-23 21:44:54 -07001203};
1204
1205/*
1206 * Below generates "auto-message" for interrupts not specific to any port or
1207 * context
1208 */
1209#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001210 .msg = #fldname, .sz = sizeof(#fldname) }
Ralph Campbellf9315512010-05-23 21:44:54 -07001211/* Below generates "auto-message" for interrupts specific to a port */
1212#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1213 SYM_LSB(IntMask, fldname##Mask##_0), \
1214 SYM_LSB(IntMask, fldname##Mask##_1)), \
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001215 .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
Ralph Campbellf9315512010-05-23 21:44:54 -07001216/* For some reason, the SerDesTrimDone bits are reversed */
1217#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1218 SYM_LSB(IntMask, fldname##Mask##_1), \
1219 SYM_LSB(IntMask, fldname##Mask##_0)), \
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001220 .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
Ralph Campbellf9315512010-05-23 21:44:54 -07001221/*
1222 * Below generates "auto-message" for interrupts specific to a context,
1223 * with ctxt-number appended
1224 */
1225#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1226 SYM_LSB(IntMask, fldname##0IntMask), \
1227 SYM_LSB(IntMask, fldname##17IntMask)), \
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001228 .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
Ralph Campbellf9315512010-05-23 21:44:54 -07001229
1230static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
1231 INTR_AUTO_P(SDmaInt),
1232 INTR_AUTO_P(SDmaProgressInt),
1233 INTR_AUTO_P(SDmaIdleInt),
1234 INTR_AUTO_P(SDmaCleanupDone),
1235 INTR_AUTO_C(RcvUrg),
1236 INTR_AUTO_P(ErrInt),
1237 INTR_AUTO(ErrInt), /* non-port-specific errs */
1238 INTR_AUTO(AssertGPIOInt),
1239 INTR_AUTO_P(SendDoneInt),
1240 INTR_AUTO(SendBufAvailInt),
1241 INTR_AUTO_C(RcvAvail),
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001242 { .mask = 0, .sz = 0 }
Ralph Campbellf9315512010-05-23 21:44:54 -07001243};
1244
1245#define TXSYMPTOM_AUTO_P(fldname) \
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001246 { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1247 .msg = #fldname, .sz = sizeof(#fldname) }
Ralph Campbellf9315512010-05-23 21:44:54 -07001248static const struct qib_hwerror_msgs hdrchk_msgs[] = {
1249 TXSYMPTOM_AUTO_P(NonKeyPacket),
1250 TXSYMPTOM_AUTO_P(GRHFail),
1251 TXSYMPTOM_AUTO_P(PkeyFail),
1252 TXSYMPTOM_AUTO_P(QPFail),
1253 TXSYMPTOM_AUTO_P(SLIDFail),
1254 TXSYMPTOM_AUTO_P(RawIPV6),
1255 TXSYMPTOM_AUTO_P(PacketTooSmall),
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001256 { .mask = 0, .sz = 0 }
Ralph Campbellf9315512010-05-23 21:44:54 -07001257};
1258
1259#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1260
1261/*
1262 * Called when we might have an error that is specific to a particular
1263 * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1264 * because we don't need to force the update of pioavail
1265 */
1266static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1267{
1268 struct qib_devdata *dd = ppd->dd;
1269 u32 i;
1270 int any;
1271 u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1272 u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1273 unsigned long sbuf[4];
1274
1275 /*
1276 * It's possible that sendbuffererror could have bits set; might
1277 * have already done this as a result of hardware error handling.
1278 */
1279 any = 0;
1280 for (i = 0; i < regcnt; ++i) {
1281 sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1282 if (sbuf[i]) {
1283 any = 1;
1284 qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1285 }
1286 }
1287
1288 if (any)
1289 qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1290}
1291
1292/* No txe_recover yet, if ever */
1293
1294/* No decode__errors yet */
1295static void err_decode(char *msg, size_t len, u64 errs,
1296 const struct qib_hwerror_msgs *msp)
1297{
1298 u64 these, lmask;
1299 int took, multi, n = 0;
1300
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001301 while (errs && msp && msp->mask) {
Ralph Campbellf9315512010-05-23 21:44:54 -07001302 multi = (msp->mask & (msp->mask - 1));
1303 while (errs & msp->mask) {
1304 these = (errs & msp->mask);
1305 lmask = (these & (these - 1)) ^ these;
1306 if (len) {
1307 if (n++) {
1308 /* separate the strings */
1309 *msg++ = ',';
1310 len--;
1311 }
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001312 BUG_ON(!msp->sz);
1313 /* msp->sz counts the nul */
1314 took = min_t(size_t, msp->sz - (size_t)1, len);
1315 memcpy(msg, msp->msg, took);
Ralph Campbellf9315512010-05-23 21:44:54 -07001316 len -= took;
1317 msg += took;
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001318 if (len)
1319 *msg = '\0';
Ralph Campbellf9315512010-05-23 21:44:54 -07001320 }
1321 errs &= ~lmask;
1322 if (len && multi) {
1323 /* More than one bit this mask */
1324 int idx = -1;
1325
1326 while (lmask & msp->mask) {
1327 ++idx;
1328 lmask >>= 1;
1329 }
1330 took = scnprintf(msg, len, "_%d", idx);
1331 len -= took;
1332 msg += took;
1333 }
1334 }
1335 ++msp;
1336 }
1337 /* If some bits are left, show in hex. */
1338 if (len && errs)
1339 snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1340 (unsigned long long) errs);
1341}
1342
1343/* only called if r1 set */
1344static void flush_fifo(struct qib_pportdata *ppd)
1345{
1346 struct qib_devdata *dd = ppd->dd;
1347 u32 __iomem *piobuf;
1348 u32 bufn;
1349 u32 *hdr;
1350 u64 pbc;
1351 const unsigned hdrwords = 7;
1352 static struct qib_ib_header ibhdr = {
1353 .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1354 .lrh[1] = IB_LID_PERMISSIVE,
1355 .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1356 .lrh[3] = IB_LID_PERMISSIVE,
1357 .u.oth.bth[0] = cpu_to_be32(
1358 (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1359 .u.oth.bth[1] = cpu_to_be32(0),
1360 .u.oth.bth[2] = cpu_to_be32(0),
1361 .u.oth.u.ud.deth[0] = cpu_to_be32(0),
1362 .u.oth.u.ud.deth[1] = cpu_to_be32(0),
1363 };
1364
1365 /*
1366 * Send a dummy VL15 packet to flush the launch FIFO.
1367 * This will not actually be sent since the TxeBypassIbc bit is set.
1368 */
1369 pbc = PBC_7322_VL15_SEND |
1370 (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1371 (hdrwords + SIZE_OF_CRC);
1372 piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1373 if (!piobuf)
1374 return;
1375 writeq(pbc, piobuf);
1376 hdr = (u32 *) &ibhdr;
1377 if (dd->flags & QIB_PIO_FLUSH_WC) {
1378 qib_flush_wc();
1379 qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1380 qib_flush_wc();
1381 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1382 qib_flush_wc();
1383 } else
1384 qib_pio_copy(piobuf + 2, hdr, hdrwords);
1385 qib_sendbuf_done(dd, bufn);
1386}
1387
1388/*
1389 * This is called with interrupts disabled and sdma_lock held.
1390 */
1391static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1392{
1393 struct qib_devdata *dd = ppd->dd;
1394 u64 set_sendctrl = 0;
1395 u64 clr_sendctrl = 0;
1396
1397 if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1398 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1399 else
1400 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1401
1402 if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1403 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1404 else
1405 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1406
1407 if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1408 set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1409 else
1410 clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1411
1412 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1413 set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1414 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1415 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1416 else
1417 clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1418 SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1419 SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1420
1421 spin_lock(&dd->sendctrl_lock);
1422
1423 /* If we are draining everything, block sends first */
1424 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1425 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1426 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1427 qib_write_kreg(dd, kr_scratch, 0);
1428 }
1429
1430 ppd->p_sendctrl |= set_sendctrl;
1431 ppd->p_sendctrl &= ~clr_sendctrl;
1432
1433 if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1434 qib_write_kreg_port(ppd, krp_sendctrl,
1435 ppd->p_sendctrl |
1436 SYM_MASK(SendCtrl_0, SDmaCleanup));
1437 else
1438 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1439 qib_write_kreg(dd, kr_scratch, 0);
1440
1441 if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1442 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1443 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1444 qib_write_kreg(dd, kr_scratch, 0);
1445 }
1446
1447 spin_unlock(&dd->sendctrl_lock);
1448
1449 if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1450 flush_fifo(ppd);
1451}
1452
1453static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1454{
1455 __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1456}
1457
1458static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1459{
1460 /*
1461 * Set SendDmaLenGen and clear and set
1462 * the MSB of the generation count to enable generation checking
1463 * and load the internal generation counter.
1464 */
1465 qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1466 qib_write_kreg_port(ppd, krp_senddmalengen,
1467 ppd->sdma_descq_cnt |
1468 (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1469}
1470
1471/*
1472 * Must be called with sdma_lock held, or before init finished.
1473 */
1474static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1475{
1476 /* Commit writes to memory and advance the tail on the chip */
1477 wmb();
1478 ppd->sdma_descq_tail = tail;
1479 qib_write_kreg_port(ppd, krp_senddmatail, tail);
1480}
1481
1482/*
1483 * This is called with interrupts disabled and sdma_lock held.
1484 */
1485static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1486{
1487 /*
1488 * Drain all FIFOs.
1489 * The hardware doesn't require this but we do it so that verbs
1490 * and user applications don't wait for link active to send stale
1491 * data.
1492 */
1493 sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1494
1495 qib_sdma_7322_setlengen(ppd);
1496 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1497 ppd->sdma_head_dma[0] = 0;
1498 qib_7322_sdma_sendctrl(ppd,
1499 ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1500}
1501
1502#define DISABLES_SDMA ( \
1503 QIB_E_P_SDMAHALT | \
1504 QIB_E_P_SDMADESCADDRMISALIGN | \
1505 QIB_E_P_SDMAMISSINGDW | \
1506 QIB_E_P_SDMADWEN | \
1507 QIB_E_P_SDMARPYTAG | \
1508 QIB_E_P_SDMA1STDESC | \
1509 QIB_E_P_SDMABASE | \
1510 QIB_E_P_SDMATAILOUTOFBOUND | \
1511 QIB_E_P_SDMAOUTOFBOUND | \
1512 QIB_E_P_SDMAGENMISMATCH)
1513
1514static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1515{
1516 unsigned long flags;
1517 struct qib_devdata *dd = ppd->dd;
1518
1519 errs &= QIB_E_P_SDMAERRS;
1520
1521 if (errs & QIB_E_P_SDMAUNEXPDATA)
1522 qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1523 ppd->port);
1524
1525 spin_lock_irqsave(&ppd->sdma_lock, flags);
1526
1527 switch (ppd->sdma_state.current_state) {
1528 case qib_sdma_state_s00_hw_down:
1529 break;
1530
1531 case qib_sdma_state_s10_hw_start_up_wait:
1532 if (errs & QIB_E_P_SDMAHALT)
1533 __qib_sdma_process_event(ppd,
1534 qib_sdma_event_e20_hw_started);
1535 break;
1536
1537 case qib_sdma_state_s20_idle:
1538 break;
1539
1540 case qib_sdma_state_s30_sw_clean_up_wait:
1541 break;
1542
1543 case qib_sdma_state_s40_hw_clean_up_wait:
1544 if (errs & QIB_E_P_SDMAHALT)
1545 __qib_sdma_process_event(ppd,
1546 qib_sdma_event_e50_hw_cleaned);
1547 break;
1548
1549 case qib_sdma_state_s50_hw_halt_wait:
1550 if (errs & QIB_E_P_SDMAHALT)
1551 __qib_sdma_process_event(ppd,
1552 qib_sdma_event_e60_hw_halted);
1553 break;
1554
1555 case qib_sdma_state_s99_running:
1556 __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1557 __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1558 break;
1559 }
1560
1561 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1562}
1563
1564/*
1565 * handle per-device errors (not per-port errors)
1566 */
1567static noinline void handle_7322_errors(struct qib_devdata *dd)
1568{
1569 char *msg;
1570 u64 iserr = 0;
1571 u64 errs;
1572 u64 mask;
1573 int log_idx;
1574
1575 qib_stats.sps_errints++;
1576 errs = qib_read_kreg64(dd, kr_errstatus);
1577 if (!errs) {
1578 qib_devinfo(dd->pcidev, "device error interrupt, "
1579 "but no error bits set!\n");
1580 goto done;
1581 }
1582
1583 /* don't report errors that are masked */
1584 errs &= dd->cspec->errormask;
1585 msg = dd->cspec->emsgbuf;
1586
1587 /* do these first, they are most important */
1588 if (errs & QIB_E_HARDWARE) {
1589 *msg = '\0';
1590 qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
1591 } else
1592 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
1593 if (errs & dd->eep_st_masks[log_idx].errs_to_log)
1594 qib_inc_eeprom_err(dd, log_idx, 1);
1595
1596 if (errs & QIB_E_SPKTERRS) {
1597 qib_disarm_7322_senderrbufs(dd->pport);
1598 qib_stats.sps_txerrs++;
1599 } else if (errs & QIB_E_INVALIDADDR)
1600 qib_stats.sps_txerrs++;
1601 else if (errs & QIB_E_ARMLAUNCH) {
1602 qib_stats.sps_txerrs++;
1603 qib_disarm_7322_senderrbufs(dd->pport);
1604 }
1605 qib_write_kreg(dd, kr_errclear, errs);
1606
1607 /*
1608 * The ones we mask off are handled specially below
1609 * or above. Also mask SDMADISABLED by default as it
1610 * is too chatty.
1611 */
1612 mask = QIB_E_HARDWARE;
1613 *msg = '\0';
1614
1615 err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
1616 qib_7322error_msgs);
1617
1618 /*
1619 * Getting reset is a tragedy for all ports. Mark the device
1620 * _and_ the ports as "offline" in way meaningful to each.
1621 */
1622 if (errs & QIB_E_RESET) {
1623 int pidx;
1624
1625 qib_dev_err(dd, "Got reset, requires re-init "
1626 "(unload and reload driver)\n");
1627 dd->flags &= ~QIB_INITTED; /* needs re-init */
1628 /* mark as having had error */
1629 *dd->devstatusp |= QIB_STATUS_HWERROR;
1630 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1631 if (dd->pport[pidx].link_speed_supported)
1632 *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1633 }
1634
1635 if (*msg && iserr)
1636 qib_dev_err(dd, "%s error\n", msg);
1637
1638 /*
1639 * If there were hdrq or egrfull errors, wake up any processes
1640 * waiting in poll. We used to try to check which contexts had
1641 * the overflow, but given the cost of that and the chip reads
1642 * to support it, it's better to just wake everybody up if we
1643 * get an overflow; waiters can poll again if it's not them.
1644 */
1645 if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1646 qib_handle_urcv(dd, ~0U);
1647 if (errs & ERR_MASK(RcvEgrFullErr))
1648 qib_stats.sps_buffull++;
1649 else
1650 qib_stats.sps_hdrfull++;
1651 }
1652
1653done:
1654 return;
1655}
1656
Mike Marciniszyne67306a2011-07-21 13:21:16 +00001657static void qib_error_tasklet(unsigned long data)
1658{
1659 struct qib_devdata *dd = (struct qib_devdata *)data;
1660
1661 handle_7322_errors(dd);
1662 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1663}
1664
Ralph Campbellf9315512010-05-23 21:44:54 -07001665static void reenable_chase(unsigned long opaque)
1666{
1667 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
1668
1669 ppd->cpspec->chase_timer.expires = 0;
1670 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1671 QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1672}
1673
1674static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
1675{
1676 ppd->cpspec->chase_end = 0;
1677
1678 if (!qib_chase)
1679 return;
1680
1681 qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1682 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1683 ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1684 add_timer(&ppd->cpspec->chase_timer);
1685}
1686
1687static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1688{
1689 u8 ibclt;
1690 u64 tnow;
1691
1692 ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1693
1694 /*
1695 * Detect and handle the state chase issue, where we can
1696 * get stuck if we are unlucky on timing on both sides of
1697 * the link. If we are, we disable, set a timer, and
1698 * then re-enable.
1699 */
1700 switch (ibclt) {
1701 case IB_7322_LT_STATE_CFGRCVFCFG:
1702 case IB_7322_LT_STATE_CFGWAITRMT:
1703 case IB_7322_LT_STATE_TXREVLANES:
1704 case IB_7322_LT_STATE_CFGENH:
1705 tnow = get_jiffies_64();
1706 if (ppd->cpspec->chase_end &&
1707 time_after64(tnow, ppd->cpspec->chase_end))
1708 disable_chase(ppd, tnow, ibclt);
1709 else if (!ppd->cpspec->chase_end)
1710 ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1711 break;
1712 default:
1713 ppd->cpspec->chase_end = 0;
1714 break;
1715 }
1716
Mitko Haralanov31264482011-06-09 20:27:26 +00001717 if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1718 ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1719 ibclt == IB_7322_LT_STATE_LINKUP) &&
Ralph Campbellf9315512010-05-23 21:44:54 -07001720 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1721 force_h1(ppd);
1722 ppd->cpspec->qdr_reforce = 1;
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08001723 if (!ppd->dd->cspec->r1)
1724 serdes_7322_los_enable(ppd, 0);
Ralph Campbellf9315512010-05-23 21:44:54 -07001725 } else if (ppd->cpspec->qdr_reforce &&
1726 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1727 (ibclt == IB_7322_LT_STATE_CFGENH ||
1728 ibclt == IB_7322_LT_STATE_CFGIDLE ||
1729 ibclt == IB_7322_LT_STATE_LINKUP))
1730 force_h1(ppd);
1731
1732 if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1733 ppd->link_speed_enabled == QIB_IB_QDR &&
1734 (ibclt == IB_7322_LT_STATE_CFGTEST ||
1735 ibclt == IB_7322_LT_STATE_CFGENH ||
1736 (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1737 ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1738 adj_tx_serdes(ppd);
1739
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08001740 if (ibclt != IB_7322_LT_STATE_LINKUP) {
1741 u8 ltstate = qib_7322_phys_portstate(ibcst);
1742 u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1743 LinkTrainingState);
1744 if (!ppd->dd->cspec->r1 &&
1745 pibclt == IB_7322_LT_STATE_LINKUP &&
1746 ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1747 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1748 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1749 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1750 /* If the link went down (but no into recovery,
1751 * turn LOS back on */
1752 serdes_7322_los_enable(ppd, 1);
1753 if (!ppd->cpspec->qdr_dfe_on &&
1754 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1755 ppd->cpspec->qdr_dfe_on = 1;
1756 ppd->cpspec->qdr_dfe_time = 0;
1757 /* On link down, reenable QDR adaptation */
1758 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1759 ppd->dd->cspec->r1 ?
1760 QDR_STATIC_ADAPT_DOWN_R1 :
1761 QDR_STATIC_ADAPT_DOWN);
1762 printk(KERN_INFO QIB_DRV_NAME
1763 " IB%u:%u re-enabled QDR adaptation "
1764 "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
1765 }
Ralph Campbellf9315512010-05-23 21:44:54 -07001766 }
1767}
1768
Mike Marciniszynf2d255a2011-01-10 17:42:22 -08001769static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1770
Ralph Campbellf9315512010-05-23 21:44:54 -07001771/*
1772 * This is per-pport error handling.
1773 * will likely get it's own MSIx interrupt (one for each port,
1774 * although just a single handler).
1775 */
1776static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1777{
1778 char *msg;
1779 u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1780 struct qib_devdata *dd = ppd->dd;
1781
1782 /* do this as soon as possible */
1783 fmask = qib_read_kreg64(dd, kr_act_fmask);
1784 if (!fmask)
1785 check_7322_rxe_status(ppd);
1786
1787 errs = qib_read_kreg_port(ppd, krp_errstatus);
1788 if (!errs)
1789 qib_devinfo(dd->pcidev,
1790 "Port%d error interrupt, but no error bits set!\n",
1791 ppd->port);
1792 if (!fmask)
1793 errs &= ~QIB_E_P_IBSTATUSCHANGED;
1794 if (!errs)
1795 goto done;
1796
1797 msg = ppd->cpspec->epmsgbuf;
1798 *msg = '\0';
1799
1800 if (errs & ~QIB_E_P_BITSEXTANT) {
1801 err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1802 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1803 if (!*msg)
1804 snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
1805 "no others");
1806 qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
1807 " errors 0x%016Lx set (and %s)\n",
1808 (errs & ~QIB_E_P_BITSEXTANT), msg);
1809 *msg = '\0';
1810 }
1811
1812 if (errs & QIB_E_P_SHDR) {
1813 u64 symptom;
1814
1815 /* determine cause, then write to clear */
1816 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1817 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1818 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
1819 hdrchk_msgs);
1820 *msg = '\0';
1821 /* senderrbuf cleared in SPKTERRS below */
1822 }
1823
1824 if (errs & QIB_E_P_SPKTERRS) {
1825 if ((errs & QIB_E_P_LINK_PKTERRS) &&
1826 !(ppd->lflags & QIBL_LINKACTIVE)) {
1827 /*
1828 * This can happen when trying to bring the link
1829 * up, but the IB link changes state at the "wrong"
1830 * time. The IB logic then complains that the packet
1831 * isn't valid. We don't want to confuse people, so
1832 * we just don't print them, except at debug
1833 */
1834 err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
1835 (errs & QIB_E_P_LINK_PKTERRS),
1836 qib_7322p_error_msgs);
1837 *msg = '\0';
1838 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1839 }
1840 qib_disarm_7322_senderrbufs(ppd);
1841 } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1842 !(ppd->lflags & QIBL_LINKACTIVE)) {
1843 /*
1844 * This can happen when SMA is trying to bring the link
1845 * up, but the IB link changes state at the "wrong" time.
1846 * The IB logic then complains that the packet isn't
1847 * valid. We don't want to confuse people, so we just
1848 * don't print them, except at debug
1849 */
1850 err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
1851 qib_7322p_error_msgs);
1852 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1853 *msg = '\0';
1854 }
1855
1856 qib_write_kreg_port(ppd, krp_errclear, errs);
1857
1858 errs &= ~ignore_this_time;
1859 if (!errs)
1860 goto done;
1861
1862 if (errs & QIB_E_P_RPKTERRS)
1863 qib_stats.sps_rcverrs++;
1864 if (errs & QIB_E_P_SPKTERRS)
1865 qib_stats.sps_txerrs++;
1866
1867 iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1868
1869 if (errs & QIB_E_P_SDMAERRS)
1870 sdma_7322_p_errors(ppd, errs);
1871
1872 if (errs & QIB_E_P_IBSTATUSCHANGED) {
1873 u64 ibcs;
1874 u8 ltstate;
1875
1876 ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1877 ltstate = qib_7322_phys_portstate(ibcs);
1878
1879 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1880 handle_serdes_issues(ppd, ibcs);
1881 if (!(ppd->cpspec->ibcctrl_a &
1882 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1883 /*
1884 * We got our interrupt, so init code should be
1885 * happy and not try alternatives. Now squelch
1886 * other "chatter" from link-negotiation (pre Init)
1887 */
1888 ppd->cpspec->ibcctrl_a |=
1889 SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1890 qib_write_kreg_port(ppd, krp_ibcctrl_a,
1891 ppd->cpspec->ibcctrl_a);
1892 }
1893
1894 /* Update our picture of width and speed from chip */
1895 ppd->link_width_active =
1896 (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1897 IB_WIDTH_4X : IB_WIDTH_1X;
1898 ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1899 LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1900 SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1901 QIB_IB_DDR : QIB_IB_SDR;
1902
1903 if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1904 IB_PHYSPORTSTATE_DISABLED)
1905 qib_set_ib_7322_lstate(ppd, 0,
1906 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
Mitko Haralanovd70585f2011-01-21 13:45:17 +00001907 else
Ralph Campbellf9315512010-05-23 21:44:54 -07001908 /*
1909 * Since going into a recovery state causes the link
1910 * state to go down and since recovery is transitory,
1911 * it is better if we "miss" ever seeing the link
1912 * training state go into recovery (i.e., ignore this
1913 * transition for link state special handling purposes)
1914 * without updating lastibcstat.
1915 */
1916 if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1917 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1918 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1919 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1920 qib_handle_e_ibstatuschanged(ppd, ibcs);
1921 }
1922 if (*msg && iserr)
1923 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
1924
1925 if (ppd->state_wanted & ppd->lflags)
1926 wake_up_interruptible(&ppd->state_wait);
1927done:
1928 return;
1929}
1930
1931/* enable/disable chip from delivering interrupts */
1932static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
1933{
1934 if (enable) {
1935 if (dd->flags & QIB_BADINTR)
1936 return;
1937 qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
1938 /* cause any pending enabled interrupts to be re-delivered */
1939 qib_write_kreg(dd, kr_intclear, 0ULL);
1940 if (dd->cspec->num_msix_entries) {
1941 /* and same for MSIx */
1942 u64 val = qib_read_kreg64(dd, kr_intgranted);
1943 if (val)
1944 qib_write_kreg(dd, kr_intgranted, val);
1945 }
1946 } else
1947 qib_write_kreg(dd, kr_intmask, 0ULL);
1948}
1949
1950/*
1951 * Try to cleanup as much as possible for anything that might have gone
1952 * wrong while in freeze mode, such as pio buffers being written by user
1953 * processes (causing armlaunch), send errors due to going into freeze mode,
1954 * etc., and try to avoid causing extra interrupts while doing so.
1955 * Forcibly update the in-memory pioavail register copies after cleanup
1956 * because the chip won't do it while in freeze mode (the register values
1957 * themselves are kept correct).
1958 * Make sure that we don't lose any important interrupts by using the chip
1959 * feature that says that writing 0 to a bit in *clear that is set in
1960 * *status will cause an interrupt to be generated again (if allowed by
1961 * the *mask value).
1962 * This is in chip-specific code because of all of the register accesses,
1963 * even though the details are similar on most chips.
1964 */
1965static void qib_7322_clear_freeze(struct qib_devdata *dd)
1966{
1967 int pidx;
1968
1969 /* disable error interrupts, to avoid confusion */
1970 qib_write_kreg(dd, kr_errmask, 0ULL);
1971
1972 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1973 if (dd->pport[pidx].link_speed_supported)
1974 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
1975 0ULL);
1976
1977 /* also disable interrupts; errormask is sometimes overwriten */
1978 qib_7322_set_intr_state(dd, 0);
1979
1980 /* clear the freeze, and be sure chip saw it */
1981 qib_write_kreg(dd, kr_control, dd->control);
1982 qib_read_kreg32(dd, kr_scratch);
1983
1984 /*
1985 * Force new interrupt if any hwerr, error or interrupt bits are
1986 * still set, and clear "safe" send packet errors related to freeze
1987 * and cancelling sends. Re-enable error interrupts before possible
1988 * force of re-interrupt on pending interrupts.
1989 */
1990 qib_write_kreg(dd, kr_hwerrclear, 0ULL);
1991 qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
1992 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1993 /* We need to purge per-port errs and reset mask, too */
1994 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1995 if (!dd->pport[pidx].link_speed_supported)
1996 continue;
1997 qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
1998 qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
1999 }
2000 qib_7322_set_intr_state(dd, 1);
2001}
2002
2003/* no error handling to speak of */
2004/**
2005 * qib_7322_handle_hwerrors - display hardware errors.
2006 * @dd: the qlogic_ib device
2007 * @msg: the output buffer
2008 * @msgl: the size of the output buffer
2009 *
2010 * Use same msg buffer as regular errors to avoid excessive stack
2011 * use. Most hardware errors are catastrophic, but for right now,
2012 * we'll print them and continue. We reuse the same message buffer as
2013 * qib_handle_errors() to avoid excessive stack usage.
2014 */
2015static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2016 size_t msgl)
2017{
2018 u64 hwerrs;
2019 u32 ctrl;
2020 int isfatal = 0;
2021
2022 hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2023 if (!hwerrs)
2024 goto bail;
2025 if (hwerrs == ~0ULL) {
2026 qib_dev_err(dd, "Read of hardware error status failed "
2027 "(all bits set); ignoring\n");
2028 goto bail;
2029 }
2030 qib_stats.sps_hwerrs++;
2031
2032 /* Always clear the error status register, except BIST fail */
2033 qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2034 ~HWE_MASK(PowerOnBISTFailed));
2035
2036 hwerrs &= dd->cspec->hwerrmask;
2037
2038 /* no EEPROM logging, yet */
2039
2040 if (hwerrs)
2041 qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
2042 "(cleared)\n", (unsigned long long) hwerrs);
2043
2044 ctrl = qib_read_kreg32(dd, kr_control);
2045 if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2046 /*
2047 * No recovery yet...
2048 */
2049 if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2050 dd->cspec->stay_in_freeze) {
2051 /*
2052 * If any set that we aren't ignoring only make the
2053 * complaint once, in case it's stuck or recurring,
2054 * and we get here multiple times
2055 * Force link down, so switch knows, and
2056 * LEDs are turned off.
2057 */
2058 if (dd->flags & QIB_INITTED)
2059 isfatal = 1;
2060 } else
2061 qib_7322_clear_freeze(dd);
2062 }
2063
2064 if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2065 isfatal = 1;
2066 strlcpy(msg, "[Memory BIST test failed, "
2067 "InfiniPath hardware unusable]", msgl);
2068 /* ignore from now on, so disable until driver reloaded */
2069 dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2070 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2071 }
2072
2073 err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2074
2075 /* Ignore esoteric PLL failures et al. */
2076
2077 qib_dev_err(dd, "%s hardware error\n", msg);
2078
2079 if (isfatal && !dd->diag_client) {
2080 qib_dev_err(dd, "Fatal Hardware Error, no longer"
2081 " usable, SN %.16s\n", dd->serial);
2082 /*
2083 * for /sys status file and user programs to print; if no
2084 * trailing brace is copied, we'll know it was truncated.
2085 */
2086 if (dd->freezemsg)
2087 snprintf(dd->freezemsg, dd->freezelen,
2088 "{%s}", msg);
2089 qib_disable_after_error(dd);
2090 }
2091bail:;
2092}
2093
2094/**
2095 * qib_7322_init_hwerrors - enable hardware errors
2096 * @dd: the qlogic_ib device
2097 *
2098 * now that we have finished initializing everything that might reasonably
2099 * cause a hardware error, and cleared those errors bits as they occur,
2100 * we can enable hardware errors in the mask (potentially enabling
2101 * freeze mode), and enable hardware errors as errors (along with
2102 * everything else) in errormask
2103 */
2104static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2105{
2106 int pidx;
2107 u64 extsval;
2108
2109 extsval = qib_read_kreg64(dd, kr_extstatus);
2110 if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2111 QIB_EXTS_MEMBIST_ENDTEST)))
2112 qib_dev_err(dd, "MemBIST did not complete!\n");
2113
2114 /* never clear BIST failure, so reported on each driver load */
2115 qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2116 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2117
2118 /* clear all */
2119 qib_write_kreg(dd, kr_errclear, ~0ULL);
2120 /* enable errors that are masked, at least this first time. */
2121 qib_write_kreg(dd, kr_errmask, ~0ULL);
2122 dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2123 for (pidx = 0; pidx < dd->num_pports; ++pidx)
2124 if (dd->pport[pidx].link_speed_supported)
2125 qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2126 ~0ULL);
2127}
2128
2129/*
2130 * Disable and enable the armlaunch error. Used for PIO bandwidth testing
2131 * on chips that are count-based, rather than trigger-based. There is no
2132 * reference counting, but that's also fine, given the intended use.
2133 * Only chip-specific because it's all register accesses
2134 */
2135static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2136{
2137 if (enable) {
2138 qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2139 dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2140 } else
2141 dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2142 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2143}
2144
2145/*
2146 * Formerly took parameter <which> in pre-shifted,
2147 * pre-merged form with LinkCmd and LinkInitCmd
2148 * together, and assuming the zero was NOP.
2149 */
2150static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2151 u16 linitcmd)
2152{
2153 u64 mod_wd;
2154 struct qib_devdata *dd = ppd->dd;
2155 unsigned long flags;
2156
2157 if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2158 /*
2159 * If we are told to disable, note that so link-recovery
2160 * code does not attempt to bring us back up.
2161 * Also reset everything that we can, so we start
2162 * completely clean when re-enabled (before we
2163 * actually issue the disable to the IBC)
2164 */
2165 qib_7322_mini_pcs_reset(ppd);
2166 spin_lock_irqsave(&ppd->lflags_lock, flags);
2167 ppd->lflags |= QIBL_IB_LINK_DISABLED;
2168 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2169 } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2170 /*
2171 * Any other linkinitcmd will lead to LINKDOWN and then
2172 * to INIT (if all is well), so clear flag to let
2173 * link-recovery code attempt to bring us back up.
2174 */
2175 spin_lock_irqsave(&ppd->lflags_lock, flags);
2176 ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2177 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2178 /*
2179 * Clear status change interrupt reduction so the
2180 * new state is seen.
2181 */
2182 ppd->cpspec->ibcctrl_a &=
2183 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2184 }
2185
2186 mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2187 (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2188
2189 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2190 mod_wd);
2191 /* write to chip to prevent back-to-back writes of ibc reg */
2192 qib_write_kreg(dd, kr_scratch, 0);
2193
2194}
2195
2196/*
2197 * The total RCV buffer memory is 64KB, used for both ports, and is
2198 * in units of 64 bytes (same as IB flow control credit unit).
2199 * The consumedVL unit in the same registers are in 32 byte units!
2200 * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2201 * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2202 * in krp_rxcreditvl15, rather than 10.
2203 */
2204#define RCV_BUF_UNITSZ 64
2205#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2206
2207static void set_vls(struct qib_pportdata *ppd)
2208{
2209 int i, numvls, totcred, cred_vl, vl0extra;
2210 struct qib_devdata *dd = ppd->dd;
2211 u64 val;
2212
2213 numvls = qib_num_vls(ppd->vls_operational);
2214
2215 /*
2216 * Set up per-VL credits. Below is kluge based on these assumptions:
2217 * 1) port is disabled at the time early_init is called.
2218 * 2) give VL15 17 credits, for two max-plausible packets.
2219 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2220 */
2221 /* 2 VL15 packets @ 288 bytes each (including IB headers) */
2222 totcred = NUM_RCV_BUF_UNITS(dd);
2223 cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2224 totcred -= cred_vl;
2225 qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2226 cred_vl = totcred / numvls;
2227 vl0extra = totcred - cred_vl * numvls;
2228 qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2229 for (i = 1; i < numvls; i++)
2230 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2231 for (; i < 8; i++) /* no buffer space for other VLs */
2232 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2233
2234 /* Notify IBC that credits need to be recalculated */
2235 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2236 val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2237 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2238 qib_write_kreg(dd, kr_scratch, 0ULL);
2239 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2240 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2241
2242 for (i = 0; i < numvls; i++)
2243 val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2244 val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2245
2246 /* Change the number of operational VLs */
2247 ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2248 ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2249 ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2250 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2251 qib_write_kreg(dd, kr_scratch, 0ULL);
2252}
2253
2254/*
2255 * The code that deals with actual SerDes is in serdes_7322_init().
2256 * Compared to the code for iba7220, it is minimal.
2257 */
2258static int serdes_7322_init(struct qib_pportdata *ppd);
2259
2260/**
2261 * qib_7322_bringup_serdes - bring up the serdes
2262 * @ppd: physical port on the qlogic_ib device
2263 */
2264static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2265{
2266 struct qib_devdata *dd = ppd->dd;
2267 u64 val, guid, ibc;
2268 unsigned long flags;
2269 int ret = 0;
2270
2271 /*
2272 * SerDes model not in Pd, but still need to
2273 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2274 * eventually.
2275 */
2276 /* Put IBC in reset, sends disabled (should be in reset already) */
2277 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2278 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2279 qib_write_kreg(dd, kr_scratch, 0ULL);
2280
2281 if (qib_compat_ddr_negotiate) {
2282 ppd->cpspec->ibdeltainprog = 1;
2283 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2284 crp_ibsymbolerr);
2285 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2286 crp_iblinkerrrecov);
2287 }
2288
2289 /* flowcontrolwatermark is in units of KBytes */
2290 ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2291 /*
2292 * Flow control is sent this often, even if no changes in
2293 * buffer space occur. Units are 128ns for this chip.
2294 * Set to 3usec.
2295 */
2296 ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2297 /* max error tolerance */
2298 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2299 /* IB credit flow control. */
2300 ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2301 /*
2302 * set initial max size pkt IBC will send, including ICRC; it's the
2303 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2304 */
2305 ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2306 SYM_LSB(IBCCtrlA_0, MaxPktLen);
2307 ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2308
2309 /* initially come up waiting for TS1, without sending anything. */
2310 val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2311 QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2312
Mitko Haralanov16d99812011-10-19 18:46:47 -04002313 ppd->cpspec->ibcctrl_a = val;
Ralph Campbellf9315512010-05-23 21:44:54 -07002314 /*
2315 * Reset the PCS interface to the serdes (and also ibc, which is still
2316 * in reset from above). Writes new value of ibcctrl_a as last step.
2317 */
2318 qib_7322_mini_pcs_reset(ppd);
2319 qib_write_kreg(dd, kr_scratch, 0ULL);
Mitko Haralanov16d99812011-10-19 18:46:47 -04002320 /* clear the linkinit cmds */
2321 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
Ralph Campbellf9315512010-05-23 21:44:54 -07002322
2323 if (!ppd->cpspec->ibcctrl_b) {
2324 unsigned lse = ppd->link_speed_enabled;
2325
2326 /*
2327 * Not on re-init after reset, establish shadow
2328 * and force initial config.
2329 */
2330 ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2331 krp_ibcctrl_b);
2332 ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2333 IBA7322_IBC_SPEED_DDR |
2334 IBA7322_IBC_SPEED_SDR |
2335 IBA7322_IBC_WIDTH_AUTONEG |
2336 SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2337 if (lse & (lse - 1)) /* Muliple speeds enabled */
2338 ppd->cpspec->ibcctrl_b |=
2339 (lse << IBA7322_IBC_SPEED_LSB) |
2340 IBA7322_IBC_IBTA_1_2_MASK |
2341 IBA7322_IBC_MAX_SPEED_MASK;
2342 else
2343 ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2344 IBA7322_IBC_SPEED_QDR |
2345 IBA7322_IBC_IBTA_1_2_MASK :
2346 (lse == QIB_IB_DDR) ?
2347 IBA7322_IBC_SPEED_DDR :
2348 IBA7322_IBC_SPEED_SDR;
2349 if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2350 (IB_WIDTH_1X | IB_WIDTH_4X))
2351 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2352 else
2353 ppd->cpspec->ibcctrl_b |=
2354 ppd->link_width_enabled == IB_WIDTH_4X ?
2355 IBA7322_IBC_WIDTH_4X_ONLY :
2356 IBA7322_IBC_WIDTH_1X_ONLY;
2357
2358 /* always enable these on driver reload, not sticky */
2359 ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2360 IBA7322_IBC_HRTBT_MASK);
2361 }
2362 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2363
2364 /* setup so we have more time at CFGTEST to change H1 */
2365 val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2366 val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2367 val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2368 qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2369
2370 serdes_7322_init(ppd);
2371
2372 guid = be64_to_cpu(ppd->guid);
2373 if (!guid) {
2374 if (dd->base_guid)
2375 guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2376 ppd->guid = cpu_to_be64(guid);
2377 }
2378
2379 qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2380 /* write to chip to prevent back-to-back writes of ibc reg */
2381 qib_write_kreg(dd, kr_scratch, 0);
2382
2383 /* Enable port */
2384 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2385 set_vls(ppd);
2386
2387 /* be paranoid against later code motion, etc. */
2388 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2389 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2390 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2391 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2392
2393 /* Also enable IBSTATUSCHG interrupt. */
2394 val = qib_read_kreg_port(ppd, krp_errmask);
2395 qib_write_kreg_port(ppd, krp_errmask,
2396 val | ERR_MASK_N(IBStatusChanged));
2397
2398 /* Always zero until we start messing with SerDes for real */
2399 return ret;
2400}
2401
2402/**
2403 * qib_7322_quiet_serdes - set serdes to txidle
2404 * @dd: the qlogic_ib device
2405 * Called when driver is being unloaded
2406 */
2407static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2408{
2409 u64 val;
2410 unsigned long flags;
2411
2412 qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2413
2414 spin_lock_irqsave(&ppd->lflags_lock, flags);
2415 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2416 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2417 wake_up(&ppd->cpspec->autoneg_wait);
Tejun Heof0626712010-10-19 15:24:36 +00002418 cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
Ralph Campbellf9315512010-05-23 21:44:54 -07002419 if (ppd->dd->cspec->r1)
Tejun Heof0626712010-10-19 15:24:36 +00002420 cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
Ralph Campbellf9315512010-05-23 21:44:54 -07002421
2422 ppd->cpspec->chase_end = 0;
2423 if (ppd->cpspec->chase_timer.data) /* if initted */
2424 del_timer_sync(&ppd->cpspec->chase_timer);
2425
2426 /*
2427 * Despite the name, actually disables IBC as well. Do it when
2428 * we are as sure as possible that no more packets can be
2429 * received, following the down and the PCS reset.
2430 * The actual disabling happens in qib_7322_mini_pci_reset(),
2431 * along with the PCS being reset.
2432 */
2433 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2434 qib_7322_mini_pcs_reset(ppd);
2435
2436 /*
2437 * Update the adjusted counters so the adjustment persists
2438 * across driver reload.
2439 */
2440 if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2441 ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2442 struct qib_devdata *dd = ppd->dd;
2443 u64 diagc;
2444
2445 /* enable counter writes */
2446 diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2447 qib_write_kreg(dd, kr_hwdiagctrl,
2448 diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2449
2450 if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2451 val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2452 if (ppd->cpspec->ibdeltainprog)
2453 val -= val - ppd->cpspec->ibsymsnap;
2454 val -= ppd->cpspec->ibsymdelta;
2455 write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2456 }
2457 if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2458 val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2459 if (ppd->cpspec->ibdeltainprog)
2460 val -= val - ppd->cpspec->iblnkerrsnap;
2461 val -= ppd->cpspec->iblnkerrdelta;
2462 write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2463 }
2464 if (ppd->cpspec->iblnkdowndelta) {
2465 val = read_7322_creg32_port(ppd, crp_iblinkdown);
2466 val += ppd->cpspec->iblnkdowndelta;
2467 write_7322_creg_port(ppd, crp_iblinkdown, val);
2468 }
2469 /*
2470 * No need to save ibmalfdelta since IB perfcounters
2471 * are cleared on driver reload.
2472 */
2473
2474 /* and disable counter writes */
2475 qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2476 }
2477}
2478
2479/**
2480 * qib_setup_7322_setextled - set the state of the two external LEDs
2481 * @ppd: physical port on the qlogic_ib device
2482 * @on: whether the link is up or not
2483 *
2484 * The exact combo of LEDs if on is true is determined by looking
2485 * at the ibcstatus.
2486 *
2487 * These LEDs indicate the physical and logical state of IB link.
2488 * For this chip (at least with recommended board pinouts), LED1
2489 * is Yellow (logical state) and LED2 is Green (physical state),
2490 *
2491 * Note: We try to match the Mellanox HCA LED behavior as best
2492 * we can. Green indicates physical link state is OK (something is
2493 * plugged in, and we can train).
2494 * Amber indicates the link is logically up (ACTIVE).
2495 * Mellanox further blinks the amber LED to indicate data packet
2496 * activity, but we have no hardware support for that, so it would
2497 * require waking up every 10-20 msecs and checking the counters
2498 * on the chip, and then turning the LED off if appropriate. That's
2499 * visible overhead, so not something we will do.
2500 */
2501static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2502{
2503 struct qib_devdata *dd = ppd->dd;
2504 u64 extctl, ledblink = 0, val;
2505 unsigned long flags;
2506 int yel, grn;
2507
2508 /*
2509 * The diags use the LED to indicate diag info, so we leave
2510 * the external LED alone when the diags are running.
2511 */
2512 if (dd->diag_client)
2513 return;
2514
2515 /* Allow override of LED display for, e.g. Locating system in rack */
2516 if (ppd->led_override) {
2517 grn = (ppd->led_override & QIB_LED_PHYS);
2518 yel = (ppd->led_override & QIB_LED_LOG);
2519 } else if (on) {
2520 val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2521 grn = qib_7322_phys_portstate(val) ==
2522 IB_PHYSPORTSTATE_LINKUP;
2523 yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2524 } else {
2525 grn = 0;
2526 yel = 0;
2527 }
2528
2529 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2530 extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2531 ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2532 if (grn) {
2533 extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2534 /*
2535 * Counts are in chip clock (4ns) periods.
2536 * This is 1/16 sec (66.6ms) on,
2537 * 3/16 sec (187.5 ms) off, with packets rcvd.
2538 */
2539 ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2540 ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2541 }
2542 if (yel)
2543 extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2544 dd->cspec->extctrl = extctl;
2545 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2546 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2547
2548 if (ledblink) /* blink the LED on packet receive */
2549 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2550}
2551
Ralph Campbellf9315512010-05-23 21:44:54 -07002552/*
2553 * Disable MSIx interrupt if enabled, call generic MSIx code
2554 * to cleanup, and clear pending MSIx interrupts.
2555 * Used for fallback to INTx, after reset, and when MSIx setup fails.
2556 */
2557static void qib_7322_nomsix(struct qib_devdata *dd)
2558{
2559 u64 intgranted;
2560 int n;
2561
2562 dd->cspec->main_int_mask = ~0ULL;
2563 n = dd->cspec->num_msix_entries;
2564 if (n) {
2565 int i;
2566
2567 dd->cspec->num_msix_entries = 0;
2568 for (i = 0; i < n; i++)
2569 free_irq(dd->cspec->msix_entries[i].vector,
2570 dd->cspec->msix_arg[i]);
2571 qib_nomsix(dd);
2572 }
2573 /* make sure no MSIx interrupts are left pending */
2574 intgranted = qib_read_kreg64(dd, kr_intgranted);
2575 if (intgranted)
2576 qib_write_kreg(dd, kr_intgranted, intgranted);
2577}
2578
2579static void qib_7322_free_irq(struct qib_devdata *dd)
2580{
2581 if (dd->cspec->irq) {
2582 free_irq(dd->cspec->irq, dd);
2583 dd->cspec->irq = 0;
2584 }
2585 qib_7322_nomsix(dd);
2586}
2587
2588static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2589{
2590 int i;
2591
Ralph Campbellf9315512010-05-23 21:44:54 -07002592 qib_7322_free_irq(dd);
2593 kfree(dd->cspec->cntrs);
2594 kfree(dd->cspec->sendchkenable);
2595 kfree(dd->cspec->sendgrhchk);
2596 kfree(dd->cspec->sendibchk);
2597 kfree(dd->cspec->msix_entries);
2598 kfree(dd->cspec->msix_arg);
2599 for (i = 0; i < dd->num_pports; i++) {
2600 unsigned long flags;
2601 u32 mask = QSFP_GPIO_MOD_PRS_N |
2602 (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2603
2604 kfree(dd->pport[i].cpspec->portcntrs);
2605 if (dd->flags & QIB_HAS_QSFP) {
2606 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2607 dd->cspec->gpio_mask &= ~mask;
2608 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2609 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2610 qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
2611 }
2612 if (dd->pport[i].ibport_data.smi_ah)
2613 ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
2614 }
2615}
2616
2617/* handle SDMA interrupts */
2618static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2619{
2620 struct qib_pportdata *ppd0 = &dd->pport[0];
2621 struct qib_pportdata *ppd1 = &dd->pport[1];
2622 u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2623 INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2624 u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2625 INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2626
2627 if (intr0)
2628 qib_sdma_intr(ppd0);
2629 if (intr1)
2630 qib_sdma_intr(ppd1);
2631
2632 if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2633 qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2634 if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2635 qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2636}
2637
2638/*
2639 * Set or clear the Send buffer available interrupt enable bit.
2640 */
2641static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2642{
2643 unsigned long flags;
2644
2645 spin_lock_irqsave(&dd->sendctrl_lock, flags);
2646 if (needint)
2647 dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2648 else
2649 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2650 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2651 qib_write_kreg(dd, kr_scratch, 0ULL);
2652 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2653}
2654
2655/*
2656 * Somehow got an interrupt with reserved bits set in interrupt status.
2657 * Print a message so we know it happened, then clear them.
2658 * keep mainline interrupt handler cache-friendly
2659 */
2660static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2661{
2662 u64 kills;
2663 char msg[128];
2664
2665 kills = istat & ~QIB_I_BITSEXTANT;
2666 qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
2667 " %s\n", (unsigned long long) kills, msg);
2668 qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2669}
2670
2671/* keep mainline interrupt handler cache-friendly */
2672static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2673{
2674 u32 gpiostatus;
2675 int handled = 0;
2676 int pidx;
2677
2678 /*
2679 * Boards for this chip currently don't use GPIO interrupts,
2680 * so clear by writing GPIOstatus to GPIOclear, and complain
2681 * to developer. To avoid endless repeats, clear
2682 * the bits in the mask, since there is some kind of
2683 * programming error or chip problem.
2684 */
2685 gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2686 /*
2687 * In theory, writing GPIOstatus to GPIOclear could
2688 * have a bad side-effect on some diagnostic that wanted
2689 * to poll for a status-change, but the various shadows
2690 * make that problematic at best. Diags will just suppress
2691 * all GPIO interrupts during such tests.
2692 */
2693 qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2694 /*
2695 * Check for QSFP MOD_PRS changes
2696 * only works for single port if IB1 != pidx1
2697 */
2698 for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2699 ++pidx) {
2700 struct qib_pportdata *ppd;
2701 struct qib_qsfp_data *qd;
2702 u32 mask;
2703 if (!dd->pport[pidx].link_speed_supported)
2704 continue;
2705 mask = QSFP_GPIO_MOD_PRS_N;
2706 ppd = dd->pport + pidx;
2707 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2708 if (gpiostatus & dd->cspec->gpio_mask & mask) {
2709 u64 pins;
2710 qd = &ppd->cpspec->qsfp_data;
2711 gpiostatus &= ~mask;
2712 pins = qib_read_kreg64(dd, kr_extstatus);
2713 pins >>= SYM_LSB(EXTStatus, GPIOIn);
2714 if (!(pins & mask)) {
2715 ++handled;
2716 qd->t_insert = get_jiffies_64();
Tejun Heof0626712010-10-19 15:24:36 +00002717 queue_work(ib_wq, &qd->work);
Ralph Campbellf9315512010-05-23 21:44:54 -07002718 }
2719 }
2720 }
2721
2722 if (gpiostatus && !handled) {
2723 const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
2724 u32 gpio_irq = mask & gpiostatus;
2725
2726 /*
2727 * Clear any troublemakers, and update chip from shadow
2728 */
2729 dd->cspec->gpio_mask &= ~gpio_irq;
2730 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2731 }
2732}
2733
2734/*
2735 * Handle errors and unusual events first, separate function
2736 * to improve cache hits for fast path interrupt handling.
2737 */
2738static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
2739{
2740 if (istat & ~QIB_I_BITSEXTANT)
2741 unknown_7322_ibits(dd, istat);
2742 if (istat & QIB_I_GPIO)
2743 unknown_7322_gpio_intr(dd);
Mike Marciniszyne67306a2011-07-21 13:21:16 +00002744 if (istat & QIB_I_C_ERROR) {
2745 qib_write_kreg(dd, kr_errmask, 0ULL);
2746 tasklet_schedule(&dd->error_tasklet);
2747 }
Ralph Campbellf9315512010-05-23 21:44:54 -07002748 if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
2749 handle_7322_p_errors(dd->rcd[0]->ppd);
2750 if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
2751 handle_7322_p_errors(dd->rcd[1]->ppd);
2752}
2753
2754/*
2755 * Dynamically adjust the rcv int timeout for a context based on incoming
2756 * packet rate.
2757 */
2758static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
2759{
2760 struct qib_devdata *dd = rcd->dd;
2761 u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
2762
2763 /*
2764 * Dynamically adjust idle timeout on chip
2765 * based on number of packets processed.
2766 */
2767 if (npkts < rcv_int_count && timeout > 2)
2768 timeout >>= 1;
2769 else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
2770 timeout = min(timeout << 1, rcv_int_timeout);
2771 else
2772 return;
2773
2774 dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
2775 qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
2776}
2777
2778/*
2779 * This is the main interrupt handler.
2780 * It will normally only be used for low frequency interrupts but may
2781 * have to handle all interrupts if INTx is enabled or fewer than normal
2782 * MSIx interrupts were allocated.
2783 * This routine should ignore the interrupt bits for any of the
2784 * dedicated MSIx handlers.
2785 */
2786static irqreturn_t qib_7322intr(int irq, void *data)
2787{
2788 struct qib_devdata *dd = data;
2789 irqreturn_t ret;
2790 u64 istat;
2791 u64 ctxtrbits;
2792 u64 rmask;
2793 unsigned i;
2794 u32 npkts;
2795
2796 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
2797 /*
2798 * This return value is not great, but we do not want the
2799 * interrupt core code to remove our interrupt handler
2800 * because we don't appear to be handling an interrupt
2801 * during a chip reset.
2802 */
2803 ret = IRQ_HANDLED;
2804 goto bail;
2805 }
2806
2807 istat = qib_read_kreg64(dd, kr_intstatus);
2808
2809 if (unlikely(istat == ~0ULL)) {
2810 qib_bad_intrstatus(dd);
2811 qib_dev_err(dd, "Interrupt status all f's, skipping\n");
2812 /* don't know if it was our interrupt or not */
2813 ret = IRQ_NONE;
2814 goto bail;
2815 }
2816
2817 istat &= dd->cspec->main_int_mask;
2818 if (unlikely(!istat)) {
2819 /* already handled, or shared and not us */
2820 ret = IRQ_NONE;
2821 goto bail;
2822 }
2823
2824 qib_stats.sps_ints++;
2825 if (dd->int_counter != (u32) -1)
2826 dd->int_counter++;
2827
2828 /* handle "errors" of various kinds first, device ahead of port */
2829 if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
2830 QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
2831 INT_MASK_P(Err, 1))))
2832 unlikely_7322_intr(dd, istat);
2833
2834 /*
2835 * Clear the interrupt bits we found set, relatively early, so we
2836 * "know" know the chip will have seen this by the time we process
2837 * the queue, and will re-interrupt if necessary. The processor
2838 * itself won't take the interrupt again until we return.
2839 */
2840 qib_write_kreg(dd, kr_intclear, istat);
2841
2842 /*
2843 * Handle kernel receive queues before checking for pio buffers
2844 * available since receives can overflow; piobuf waiters can afford
2845 * a few extra cycles, since they were waiting anyway.
2846 */
2847 ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
2848 if (ctxtrbits) {
2849 rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
2850 (1ULL << QIB_I_RCVURG_LSB);
2851 for (i = 0; i < dd->first_user_ctxt; i++) {
2852 if (ctxtrbits & rmask) {
2853 ctxtrbits &= ~rmask;
Mike Marciniszyn44d75d32011-10-19 16:42:23 -04002854 if (dd->rcd[i])
Ralph Campbellf9315512010-05-23 21:44:54 -07002855 qib_kreceive(dd->rcd[i], NULL, &npkts);
Ralph Campbellf9315512010-05-23 21:44:54 -07002856 }
2857 rmask <<= 1;
2858 }
2859 if (ctxtrbits) {
2860 ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
2861 (ctxtrbits >> QIB_I_RCVURG_LSB);
2862 qib_handle_urcv(dd, ctxtrbits);
2863 }
2864 }
2865
2866 if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
2867 sdma_7322_intr(dd, istat);
2868
2869 if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
2870 qib_ib_piobufavail(dd);
2871
2872 ret = IRQ_HANDLED;
2873bail:
2874 return ret;
2875}
2876
2877/*
2878 * Dedicated receive packet available interrupt handler.
2879 */
2880static irqreturn_t qib_7322pintr(int irq, void *data)
2881{
2882 struct qib_ctxtdata *rcd = data;
2883 struct qib_devdata *dd = rcd->dd;
2884 u32 npkts;
2885
2886 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2887 /*
2888 * This return value is not great, but we do not want the
2889 * interrupt core code to remove our interrupt handler
2890 * because we don't appear to be handling an interrupt
2891 * during a chip reset.
2892 */
2893 return IRQ_HANDLED;
2894
2895 qib_stats.sps_ints++;
2896 if (dd->int_counter != (u32) -1)
2897 dd->int_counter++;
2898
Ralph Campbellf9315512010-05-23 21:44:54 -07002899 /* Clear the interrupt bit we expect to be set. */
2900 qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
2901 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
2902
2903 qib_kreceive(rcd, NULL, &npkts);
Ralph Campbellf9315512010-05-23 21:44:54 -07002904
2905 return IRQ_HANDLED;
2906}
2907
2908/*
2909 * Dedicated Send buffer available interrupt handler.
2910 */
2911static irqreturn_t qib_7322bufavail(int irq, void *data)
2912{
2913 struct qib_devdata *dd = data;
2914
2915 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2916 /*
2917 * This return value is not great, but we do not want the
2918 * interrupt core code to remove our interrupt handler
2919 * because we don't appear to be handling an interrupt
2920 * during a chip reset.
2921 */
2922 return IRQ_HANDLED;
2923
2924 qib_stats.sps_ints++;
2925 if (dd->int_counter != (u32) -1)
2926 dd->int_counter++;
2927
2928 /* Clear the interrupt bit we expect to be set. */
2929 qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
2930
2931 /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
2932 if (dd->flags & QIB_INITTED)
2933 qib_ib_piobufavail(dd);
2934 else
2935 qib_wantpiobuf_7322_intr(dd, 0);
2936
2937 return IRQ_HANDLED;
2938}
2939
2940/*
2941 * Dedicated Send DMA interrupt handler.
2942 */
2943static irqreturn_t sdma_intr(int irq, void *data)
2944{
2945 struct qib_pportdata *ppd = data;
2946 struct qib_devdata *dd = ppd->dd;
2947
2948 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2949 /*
2950 * This return value is not great, but we do not want the
2951 * interrupt core code to remove our interrupt handler
2952 * because we don't appear to be handling an interrupt
2953 * during a chip reset.
2954 */
2955 return IRQ_HANDLED;
2956
2957 qib_stats.sps_ints++;
2958 if (dd->int_counter != (u32) -1)
2959 dd->int_counter++;
2960
Ralph Campbellf9315512010-05-23 21:44:54 -07002961 /* Clear the interrupt bit we expect to be set. */
2962 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2963 INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
2964 qib_sdma_intr(ppd);
2965
2966 return IRQ_HANDLED;
2967}
2968
2969/*
2970 * Dedicated Send DMA idle interrupt handler.
2971 */
2972static irqreturn_t sdma_idle_intr(int irq, void *data)
2973{
2974 struct qib_pportdata *ppd = data;
2975 struct qib_devdata *dd = ppd->dd;
2976
2977 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
2978 /*
2979 * This return value is not great, but we do not want the
2980 * interrupt core code to remove our interrupt handler
2981 * because we don't appear to be handling an interrupt
2982 * during a chip reset.
2983 */
2984 return IRQ_HANDLED;
2985
2986 qib_stats.sps_ints++;
2987 if (dd->int_counter != (u32) -1)
2988 dd->int_counter++;
2989
Ralph Campbellf9315512010-05-23 21:44:54 -07002990 /* Clear the interrupt bit we expect to be set. */
2991 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
2992 INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
2993 qib_sdma_intr(ppd);
2994
2995 return IRQ_HANDLED;
2996}
2997
2998/*
2999 * Dedicated Send DMA progress interrupt handler.
3000 */
3001static irqreturn_t sdma_progress_intr(int irq, void *data)
3002{
3003 struct qib_pportdata *ppd = data;
3004 struct qib_devdata *dd = ppd->dd;
3005
3006 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3007 /*
3008 * This return value is not great, but we do not want the
3009 * interrupt core code to remove our interrupt handler
3010 * because we don't appear to be handling an interrupt
3011 * during a chip reset.
3012 */
3013 return IRQ_HANDLED;
3014
3015 qib_stats.sps_ints++;
3016 if (dd->int_counter != (u32) -1)
3017 dd->int_counter++;
3018
Ralph Campbellf9315512010-05-23 21:44:54 -07003019 /* Clear the interrupt bit we expect to be set. */
3020 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3021 INT_MASK_P(SDmaProgress, 1) :
3022 INT_MASK_P(SDmaProgress, 0));
3023 qib_sdma_intr(ppd);
3024
3025 return IRQ_HANDLED;
3026}
3027
3028/*
3029 * Dedicated Send DMA cleanup interrupt handler.
3030 */
3031static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3032{
3033 struct qib_pportdata *ppd = data;
3034 struct qib_devdata *dd = ppd->dd;
3035
3036 if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3037 /*
3038 * This return value is not great, but we do not want the
3039 * interrupt core code to remove our interrupt handler
3040 * because we don't appear to be handling an interrupt
3041 * during a chip reset.
3042 */
3043 return IRQ_HANDLED;
3044
3045 qib_stats.sps_ints++;
3046 if (dd->int_counter != (u32) -1)
3047 dd->int_counter++;
3048
Ralph Campbellf9315512010-05-23 21:44:54 -07003049 /* Clear the interrupt bit we expect to be set. */
3050 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3051 INT_MASK_PM(SDmaCleanupDone, 1) :
3052 INT_MASK_PM(SDmaCleanupDone, 0));
3053 qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3054
3055 return IRQ_HANDLED;
3056}
3057
3058/*
3059 * Set up our chip-specific interrupt handler.
3060 * The interrupt type has already been setup, so
3061 * we just need to do the registration and error checking.
3062 * If we are using MSIx interrupts, we may fall back to
3063 * INTx later, if the interrupt handler doesn't get called
3064 * within 1/2 second (see verify_interrupt()).
3065 */
3066static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3067{
3068 int ret, i, msixnum;
3069 u64 redirect[6];
3070 u64 mask;
3071
3072 if (!dd->num_pports)
3073 return;
3074
3075 if (clearpend) {
3076 /*
3077 * if not switching interrupt types, be sure interrupts are
3078 * disabled, and then clear anything pending at this point,
3079 * because we are starting clean.
3080 */
3081 qib_7322_set_intr_state(dd, 0);
3082
3083 /* clear the reset error, init error/hwerror mask */
3084 qib_7322_init_hwerrors(dd);
3085
3086 /* clear any interrupt bits that might be set */
3087 qib_write_kreg(dd, kr_intclear, ~0ULL);
3088
3089 /* make sure no pending MSIx intr, and clear diag reg */
3090 qib_write_kreg(dd, kr_intgranted, ~0ULL);
3091 qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3092 }
3093
3094 if (!dd->cspec->num_msix_entries) {
3095 /* Try to get INTx interrupt */
3096try_intx:
3097 if (!dd->pcidev->irq) {
3098 qib_dev_err(dd, "irq is 0, BIOS error? "
3099 "Interrupts won't work\n");
3100 goto bail;
3101 }
3102 ret = request_irq(dd->pcidev->irq, qib_7322intr,
3103 IRQF_SHARED, QIB_DRV_NAME, dd);
3104 if (ret) {
3105 qib_dev_err(dd, "Couldn't setup INTx "
3106 "interrupt (irq=%d): %d\n",
3107 dd->pcidev->irq, ret);
3108 goto bail;
3109 }
3110 dd->cspec->irq = dd->pcidev->irq;
3111 dd->cspec->main_int_mask = ~0ULL;
3112 goto bail;
3113 }
3114
3115 /* Try to get MSIx interrupts */
3116 memset(redirect, 0, sizeof redirect);
3117 mask = ~0ULL;
3118 msixnum = 0;
3119 for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3120 irq_handler_t handler;
3121 const char *name;
3122 void *arg;
3123 u64 val;
3124 int lsb, reg, sh;
3125
3126 if (i < ARRAY_SIZE(irq_table)) {
3127 if (irq_table[i].port) {
3128 /* skip if for a non-configured port */
3129 if (irq_table[i].port > dd->num_pports)
3130 continue;
3131 arg = dd->pport + irq_table[i].port - 1;
3132 } else
3133 arg = dd;
3134 lsb = irq_table[i].lsb;
3135 handler = irq_table[i].handler;
3136 name = irq_table[i].name;
3137 } else {
3138 unsigned ctxt;
3139
3140 ctxt = i - ARRAY_SIZE(irq_table);
3141 /* per krcvq context receive interrupt */
3142 arg = dd->rcd[ctxt];
3143 if (!arg)
3144 continue;
Mike Marciniszyne67306a2011-07-21 13:21:16 +00003145 if (qib_krcvq01_no_msi && ctxt < 2)
3146 continue;
Ralph Campbellf9315512010-05-23 21:44:54 -07003147 lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3148 handler = qib_7322pintr;
3149 name = QIB_DRV_NAME " (kctx)";
3150 }
3151 ret = request_irq(dd->cspec->msix_entries[msixnum].vector,
3152 handler, 0, name, arg);
3153 if (ret) {
3154 /*
3155 * Shouldn't happen since the enable said we could
3156 * have as many as we are trying to setup here.
3157 */
3158 qib_dev_err(dd, "Couldn't setup MSIx "
3159 "interrupt (vec=%d, irq=%d): %d\n", msixnum,
3160 dd->cspec->msix_entries[msixnum].vector,
3161 ret);
3162 qib_7322_nomsix(dd);
3163 goto try_intx;
3164 }
3165 dd->cspec->msix_arg[msixnum] = arg;
3166 if (lsb >= 0) {
3167 reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3168 sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3169 SYM_LSB(IntRedirect0, vec1);
3170 mask &= ~(1ULL << lsb);
3171 redirect[reg] |= ((u64) msixnum) << sh;
3172 }
3173 val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3174 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3175 msixnum++;
3176 }
3177 /* Initialize the vector mapping */
3178 for (i = 0; i < ARRAY_SIZE(redirect); i++)
3179 qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3180 dd->cspec->main_int_mask = mask;
Mike Marciniszyne67306a2011-07-21 13:21:16 +00003181 tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3182 (unsigned long)dd);
Ralph Campbellf9315512010-05-23 21:44:54 -07003183bail:;
3184}
3185
3186/**
3187 * qib_7322_boardname - fill in the board name and note features
3188 * @dd: the qlogic_ib device
3189 *
3190 * info will be based on the board revision register
3191 */
3192static unsigned qib_7322_boardname(struct qib_devdata *dd)
3193{
3194 /* Will need enumeration of board-types here */
3195 char *n;
3196 u32 boardid, namelen;
3197 unsigned features = DUAL_PORT_CAP;
3198
3199 boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3200
3201 switch (boardid) {
3202 case 0:
3203 n = "InfiniPath_QLE7342_Emulation";
3204 break;
3205 case 1:
3206 n = "InfiniPath_QLE7340";
3207 dd->flags |= QIB_HAS_QSFP;
3208 features = PORT_SPD_CAP;
3209 break;
3210 case 2:
3211 n = "InfiniPath_QLE7342";
3212 dd->flags |= QIB_HAS_QSFP;
3213 break;
3214 case 3:
3215 n = "InfiniPath_QMI7342";
3216 break;
3217 case 4:
3218 n = "InfiniPath_Unsupported7342";
3219 qib_dev_err(dd, "Unsupported version of QMH7342\n");
3220 features = 0;
3221 break;
3222 case BOARD_QMH7342:
3223 n = "InfiniPath_QMH7342";
3224 features = 0x24;
3225 break;
3226 case BOARD_QME7342:
3227 n = "InfiniPath_QME7342";
3228 break;
Mike Marciniszynf509f9c2011-01-10 17:42:19 -08003229 case 8:
3230 n = "InfiniPath_QME7362";
3231 dd->flags |= QIB_HAS_QSFP;
3232 break;
Ralph Campbellf9315512010-05-23 21:44:54 -07003233 case 15:
3234 n = "InfiniPath_QLE7342_TEST";
3235 dd->flags |= QIB_HAS_QSFP;
3236 break;
3237 default:
3238 n = "InfiniPath_QLE73xy_UNKNOWN";
3239 qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3240 break;
3241 }
3242 dd->board_atten = 1; /* index into txdds_Xdr */
3243
3244 namelen = strlen(n) + 1;
3245 dd->boardname = kmalloc(namelen, GFP_KERNEL);
3246 if (!dd->boardname)
3247 qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
3248 else
3249 snprintf(dd->boardname, namelen, "%s", n);
3250
3251 snprintf(dd->boardversion, sizeof(dd->boardversion),
3252 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3253 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3254 (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
3255 dd->majrev, dd->minrev,
3256 (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
3257
3258 if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3259 qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
3260 " by module parameter\n", dd->unit);
3261 features &= PORT_SPD_CAP;
3262 }
3263
3264 return features;
3265}
3266
3267/*
3268 * This routine sleeps, so it can only be called from user context, not
3269 * from interrupt context.
3270 */
3271static int qib_do_7322_reset(struct qib_devdata *dd)
3272{
3273 u64 val;
3274 u64 *msix_vecsave;
3275 int i, msix_entries, ret = 1;
3276 u16 cmdval;
3277 u8 int_line, clinesz;
3278 unsigned long flags;
3279
3280 /* Use dev_err so it shows up in logs, etc. */
3281 qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3282
3283 qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3284
3285 msix_entries = dd->cspec->num_msix_entries;
3286
3287 /* no interrupts till re-initted */
3288 qib_7322_set_intr_state(dd, 0);
3289
3290 if (msix_entries) {
3291 qib_7322_nomsix(dd);
3292 /* can be up to 512 bytes, too big for stack */
3293 msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3294 sizeof(u64), GFP_KERNEL);
3295 if (!msix_vecsave)
3296 qib_dev_err(dd, "No mem to save MSIx data\n");
3297 } else
3298 msix_vecsave = NULL;
3299
3300 /*
3301 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3302 * info that is set up by the BIOS, so we have to save and restore
3303 * it ourselves. There is some risk something could change it,
3304 * after we save it, but since we have disabled the MSIx, it
3305 * shouldn't be touched...
3306 */
3307 for (i = 0; i < msix_entries; i++) {
3308 u64 vecaddr, vecdata;
3309 vecaddr = qib_read_kreg64(dd, 2 * i +
3310 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3311 vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3312 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3313 if (msix_vecsave) {
3314 msix_vecsave[2 * i] = vecaddr;
3315 /* save it without the masked bit set */
3316 msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3317 }
3318 }
3319
3320 dd->pport->cpspec->ibdeltainprog = 0;
3321 dd->pport->cpspec->ibsymdelta = 0;
3322 dd->pport->cpspec->iblnkerrdelta = 0;
3323 dd->pport->cpspec->ibmalfdelta = 0;
3324 dd->int_counter = 0; /* so we check interrupts work again */
3325
3326 /*
3327 * Keep chip from being accessed until we are ready. Use
3328 * writeq() directly, to allow the write even though QIB_PRESENT
Lucas De Marchie9c54992011-04-26 23:28:26 -07003329 * isn't set.
Ralph Campbellf9315512010-05-23 21:44:54 -07003330 */
3331 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3332 dd->flags |= QIB_DOING_RESET;
3333 val = dd->control | QLOGIC_IB_C_RESET;
3334 writeq(val, &dd->kregbase[kr_control]);
3335
3336 for (i = 1; i <= 5; i++) {
3337 /*
3338 * Allow MBIST, etc. to complete; longer on each retry.
3339 * We sometimes get machine checks from bus timeout if no
3340 * response, so for now, make it *really* long.
3341 */
3342 msleep(1000 + (1 + i) * 3000);
3343
3344 qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3345
3346 /*
3347 * Use readq directly, so we don't need to mark it as PRESENT
3348 * until we get a successful indication that all is well.
3349 */
3350 val = readq(&dd->kregbase[kr_revision]);
3351 if (val == dd->revision)
3352 break;
3353 if (i == 5) {
3354 qib_dev_err(dd, "Failed to initialize after reset, "
3355 "unusable\n");
3356 ret = 0;
3357 goto bail;
3358 }
3359 }
3360
3361 dd->flags |= QIB_PRESENT; /* it's back */
3362
3363 if (msix_entries) {
3364 /* restore the MSIx vector address and data if saved above */
3365 for (i = 0; i < msix_entries; i++) {
3366 dd->cspec->msix_entries[i].entry = i;
3367 if (!msix_vecsave || !msix_vecsave[2 * i])
3368 continue;
3369 qib_write_kreg(dd, 2 * i +
3370 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3371 msix_vecsave[2 * i]);
3372 qib_write_kreg(dd, 1 + 2 * i +
3373 (QIB_7322_MsixTable_OFFS / sizeof(u64)),
3374 msix_vecsave[1 + 2 * i]);
3375 }
3376 }
3377
3378 /* initialize the remaining registers. */
3379 for (i = 0; i < dd->num_pports; ++i)
3380 write_7322_init_portregs(&dd->pport[i]);
3381 write_7322_initregs(dd);
3382
3383 if (qib_pcie_params(dd, dd->lbus_width,
3384 &dd->cspec->num_msix_entries,
3385 dd->cspec->msix_entries))
3386 qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
3387 "continuing anyway\n");
3388
3389 qib_setup_7322_interrupt(dd, 1);
3390
3391 for (i = 0; i < dd->num_pports; ++i) {
3392 struct qib_pportdata *ppd = &dd->pport[i];
3393
3394 spin_lock_irqsave(&ppd->lflags_lock, flags);
3395 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3396 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3397 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3398 }
3399
3400bail:
3401 dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3402 kfree(msix_vecsave);
3403 return ret;
3404}
3405
3406/**
3407 * qib_7322_put_tid - write a TID to the chip
3408 * @dd: the qlogic_ib device
3409 * @tidptr: pointer to the expected TID (in chip) to update
3410 * @tidtype: 0 for eager, 1 for expected
3411 * @pa: physical address of in memory buffer; tidinvalid if freeing
3412 */
3413static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3414 u32 type, unsigned long pa)
3415{
3416 if (!(dd->flags & QIB_PRESENT))
3417 return;
3418 if (pa != dd->tidinvalid) {
3419 u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3420
3421 /* paranoia checks */
3422 if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3423 qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3424 pa);
3425 return;
3426 }
3427 if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3428 qib_dev_err(dd, "Physical page address 0x%lx "
3429 "larger than supported\n", pa);
3430 return;
3431 }
3432
3433 if (type == RCVHQ_RCV_TYPE_EAGER)
3434 chippa |= dd->tidtemplate;
3435 else /* for now, always full 4KB page */
3436 chippa |= IBA7322_TID_SZ_4K;
3437 pa = chippa;
3438 }
3439 writeq(pa, tidptr);
3440 mmiowb();
3441}
3442
3443/**
3444 * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3445 * @dd: the qlogic_ib device
3446 * @ctxt: the ctxt
3447 *
3448 * clear all TID entries for a ctxt, expected and eager.
3449 * Used from qib_close().
3450 */
3451static void qib_7322_clear_tids(struct qib_devdata *dd,
3452 struct qib_ctxtdata *rcd)
3453{
3454 u64 __iomem *tidbase;
3455 unsigned long tidinv;
3456 u32 ctxt;
3457 int i;
3458
3459 if (!dd->kregbase || !rcd)
3460 return;
3461
3462 ctxt = rcd->ctxt;
3463
3464 tidinv = dd->tidinvalid;
3465 tidbase = (u64 __iomem *)
3466 ((char __iomem *) dd->kregbase +
3467 dd->rcvtidbase +
3468 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3469
3470 for (i = 0; i < dd->rcvtidcnt; i++)
3471 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3472 tidinv);
3473
3474 tidbase = (u64 __iomem *)
3475 ((char __iomem *) dd->kregbase +
3476 dd->rcvegrbase +
3477 rcd->rcvegr_tid_base * sizeof(*tidbase));
3478
3479 for (i = 0; i < rcd->rcvegrcnt; i++)
3480 qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3481 tidinv);
3482}
3483
3484/**
3485 * qib_7322_tidtemplate - setup constants for TID updates
3486 * @dd: the qlogic_ib device
3487 *
3488 * We setup stuff that we use a lot, to avoid calculating each time
3489 */
3490static void qib_7322_tidtemplate(struct qib_devdata *dd)
3491{
3492 /*
3493 * For now, we always allocate 4KB buffers (at init) so we can
3494 * receive max size packets. We may want a module parameter to
3495 * specify 2KB or 4KB and/or make it per port instead of per device
3496 * for those who want to reduce memory footprint. Note that the
3497 * rcvhdrentsize size must be large enough to hold the largest
3498 * IB header (currently 96 bytes) that we expect to handle (plus of
3499 * course the 2 dwords of RHF).
3500 */
3501 if (dd->rcvegrbufsize == 2048)
3502 dd->tidtemplate = IBA7322_TID_SZ_2K;
3503 else if (dd->rcvegrbufsize == 4096)
3504 dd->tidtemplate = IBA7322_TID_SZ_4K;
3505 dd->tidinvalid = 0;
3506}
3507
3508/**
3509 * qib_init_7322_get_base_info - set chip-specific flags for user code
3510 * @rcd: the qlogic_ib ctxt
3511 * @kbase: qib_base_info pointer
3512 *
3513 * We set the PCIE flag because the lower bandwidth on PCIe vs
3514 * HyperTransport can affect some user packet algorithims.
3515 */
3516
3517static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3518 struct qib_base_info *kinfo)
3519{
3520 kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3521 QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3522 QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3523 if (rcd->dd->cspec->r1)
3524 kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3525 if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3526 kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3527
3528 return 0;
3529}
3530
3531static struct qib_message_header *
3532qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3533{
3534 u32 offset = qib_hdrget_offset(rhf_addr);
3535
3536 return (struct qib_message_header *)
3537 (rhf_addr - dd->rhf_offset + offset);
3538}
3539
3540/*
3541 * Configure number of contexts.
3542 */
3543static void qib_7322_config_ctxts(struct qib_devdata *dd)
3544{
3545 unsigned long flags;
3546 u32 nchipctxts;
3547
3548 nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3549 dd->cspec->numctxts = nchipctxts;
3550 if (qib_n_krcv_queues > 1 && dd->num_pports) {
Ralph Campbellf9315512010-05-23 21:44:54 -07003551 dd->first_user_ctxt = NUM_IB_PORTS +
3552 (qib_n_krcv_queues - 1) * dd->num_pports;
3553 if (dd->first_user_ctxt > nchipctxts)
3554 dd->first_user_ctxt = nchipctxts;
3555 dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3556 } else {
3557 dd->first_user_ctxt = NUM_IB_PORTS;
3558 dd->n_krcv_queues = 1;
3559 }
3560
3561 if (!qib_cfgctxts) {
3562 int nctxts = dd->first_user_ctxt + num_online_cpus();
3563
3564 if (nctxts <= 6)
3565 dd->ctxtcnt = 6;
3566 else if (nctxts <= 10)
3567 dd->ctxtcnt = 10;
3568 else if (nctxts <= nchipctxts)
3569 dd->ctxtcnt = nchipctxts;
3570 } else if (qib_cfgctxts < dd->num_pports)
3571 dd->ctxtcnt = dd->num_pports;
3572 else if (qib_cfgctxts <= nchipctxts)
3573 dd->ctxtcnt = qib_cfgctxts;
3574 if (!dd->ctxtcnt) /* none of the above, set to max */
3575 dd->ctxtcnt = nchipctxts;
3576
3577 /*
3578 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3579 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3580 * Lock to be paranoid about later motion, etc.
3581 */
3582 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3583 if (dd->ctxtcnt > 10)
3584 dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3585 else if (dd->ctxtcnt > 6)
3586 dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3587 /* else configure for default 6 receive ctxts */
3588
3589 /* The XRC opcode is 5. */
3590 dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3591
3592 /*
3593 * RcvCtrl *must* be written here so that the
3594 * chip understands how to change rcvegrcnt below.
3595 */
3596 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3597 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3598
3599 /* kr_rcvegrcnt changes based on the number of contexts enabled */
3600 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
Mike Marciniszyn0a43e112011-01-10 17:42:19 -08003601 if (qib_rcvhdrcnt)
3602 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3603 else
3604 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
3605 dd->num_pports > 1 ? 1024U : 2048U);
Ralph Campbellf9315512010-05-23 21:44:54 -07003606}
3607
3608static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3609{
3610
3611 int lsb, ret = 0;
3612 u64 maskr; /* right-justified mask */
3613
3614 switch (which) {
3615
3616 case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3617 ret = ppd->link_width_enabled;
3618 goto done;
3619
3620 case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3621 ret = ppd->link_width_active;
3622 goto done;
3623
3624 case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3625 ret = ppd->link_speed_enabled;
3626 goto done;
3627
3628 case QIB_IB_CFG_SPD: /* Get current Link spd */
3629 ret = ppd->link_speed_active;
3630 goto done;
3631
3632 case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3633 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3634 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3635 break;
3636
3637 case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3638 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3639 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3640 break;
3641
3642 case QIB_IB_CFG_LINKLATENCY:
3643 ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
3644 SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
3645 goto done;
3646
3647 case QIB_IB_CFG_OP_VLS:
3648 ret = ppd->vls_operational;
3649 goto done;
3650
3651 case QIB_IB_CFG_VL_HIGH_CAP:
3652 ret = 16;
3653 goto done;
3654
3655 case QIB_IB_CFG_VL_LOW_CAP:
3656 ret = 16;
3657 goto done;
3658
3659 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3660 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3661 OverrunThreshold);
3662 goto done;
3663
3664 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3665 ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3666 PhyerrThreshold);
3667 goto done;
3668
3669 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3670 /* will only take effect when the link state changes */
3671 ret = (ppd->cpspec->ibcctrl_a &
3672 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
3673 IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
3674 goto done;
3675
3676 case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
3677 lsb = IBA7322_IBC_HRTBT_LSB;
3678 maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
3679 break;
3680
3681 case QIB_IB_CFG_PMA_TICKS:
3682 /*
3683 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
3684 * Since the clock is always 250MHz, the value is 3, 1 or 0.
3685 */
3686 if (ppd->link_speed_active == QIB_IB_QDR)
3687 ret = 3;
3688 else if (ppd->link_speed_active == QIB_IB_DDR)
3689 ret = 1;
3690 else
3691 ret = 0;
3692 goto done;
3693
3694 default:
3695 ret = -EINVAL;
3696 goto done;
3697 }
3698 ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
3699done:
3700 return ret;
3701}
3702
3703/*
3704 * Below again cribbed liberally from older version. Do not lean
3705 * heavily on it.
3706 */
3707#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
3708#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
3709 | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
3710
3711static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
3712{
3713 struct qib_devdata *dd = ppd->dd;
3714 u64 maskr; /* right-justified mask */
3715 int lsb, ret = 0;
3716 u16 lcmd, licmd;
3717 unsigned long flags;
3718
3719 switch (which) {
3720 case QIB_IB_CFG_LIDLMC:
3721 /*
3722 * Set LID and LMC. Combined to avoid possible hazard
3723 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
3724 */
3725 lsb = IBA7322_IBC_DLIDLMC_SHIFT;
3726 maskr = IBA7322_IBC_DLIDLMC_MASK;
3727 /*
3728 * For header-checking, the SLID in the packet will
3729 * be masked with SendIBSLMCMask, and compared
3730 * with SendIBSLIDAssignMask. Make sure we do not
3731 * set any bits not covered by the mask, or we get
3732 * false-positives.
3733 */
3734 qib_write_kreg_port(ppd, krp_sendslid,
3735 val & (val >> 16) & SendIBSLIDAssignMask);
3736 qib_write_kreg_port(ppd, krp_sendslidmask,
3737 (val >> 16) & SendIBSLMCMask);
3738 break;
3739
3740 case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
3741 ppd->link_width_enabled = val;
3742 /* convert IB value to chip register value */
3743 if (val == IB_WIDTH_1X)
3744 val = 0;
3745 else if (val == IB_WIDTH_4X)
3746 val = 1;
3747 else
3748 val = 3;
3749 maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
3750 lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
3751 break;
3752
3753 case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
3754 /*
3755 * As with width, only write the actual register if the
3756 * link is currently down, otherwise takes effect on next
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003757 * link change. Since setting is being explicitly requested
Ralph Campbellf9315512010-05-23 21:44:54 -07003758 * (via MAD or sysfs), clear autoneg failure status if speed
3759 * autoneg is enabled.
3760 */
3761 ppd->link_speed_enabled = val;
3762 val <<= IBA7322_IBC_SPEED_LSB;
3763 maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
3764 IBA7322_IBC_MAX_SPEED_MASK;
3765 if (val & (val - 1)) {
3766 /* Muliple speeds enabled */
3767 val |= IBA7322_IBC_IBTA_1_2_MASK |
3768 IBA7322_IBC_MAX_SPEED_MASK;
3769 spin_lock_irqsave(&ppd->lflags_lock, flags);
3770 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3771 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3772 } else if (val & IBA7322_IBC_SPEED_QDR)
3773 val |= IBA7322_IBC_IBTA_1_2_MASK;
3774 /* IBTA 1.2 mode + min/max + speed bits are contiguous */
3775 lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
3776 break;
3777
3778 case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
3779 lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3780 maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3781 break;
3782
3783 case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
3784 lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3785 maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3786 break;
3787
3788 case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
3789 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3790 OverrunThreshold);
3791 if (maskr != val) {
3792 ppd->cpspec->ibcctrl_a &=
3793 ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
3794 ppd->cpspec->ibcctrl_a |= (u64) val <<
3795 SYM_LSB(IBCCtrlA_0, OverrunThreshold);
3796 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3797 ppd->cpspec->ibcctrl_a);
3798 qib_write_kreg(dd, kr_scratch, 0ULL);
3799 }
3800 goto bail;
3801
3802 case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
3803 maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
3804 PhyerrThreshold);
3805 if (maskr != val) {
3806 ppd->cpspec->ibcctrl_a &=
3807 ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
3808 ppd->cpspec->ibcctrl_a |= (u64) val <<
3809 SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
3810 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3811 ppd->cpspec->ibcctrl_a);
3812 qib_write_kreg(dd, kr_scratch, 0ULL);
3813 }
3814 goto bail;
3815
3816 case QIB_IB_CFG_PKEYS: /* update pkeys */
3817 maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
3818 ((u64) ppd->pkeys[2] << 32) |
3819 ((u64) ppd->pkeys[3] << 48);
3820 qib_write_kreg_port(ppd, krp_partitionkey, maskr);
3821 goto bail;
3822
3823 case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
3824 /* will only take effect when the link state changes */
3825 if (val == IB_LINKINITCMD_POLL)
3826 ppd->cpspec->ibcctrl_a &=
3827 ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3828 else /* SLEEP */
3829 ppd->cpspec->ibcctrl_a |=
3830 SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
3831 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
3832 qib_write_kreg(dd, kr_scratch, 0ULL);
3833 goto bail;
3834
3835 case QIB_IB_CFG_MTU: /* update the MTU in IBC */
3836 /*
3837 * Update our housekeeping variables, and set IBC max
3838 * size, same as init code; max IBC is max we allow in
3839 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
3840 * Set even if it's unchanged, print debug message only
3841 * on changes.
3842 */
3843 val = (ppd->ibmaxlen >> 2) + 1;
3844 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
3845 ppd->cpspec->ibcctrl_a |= (u64)val <<
3846 SYM_LSB(IBCCtrlA_0, MaxPktLen);
3847 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3848 ppd->cpspec->ibcctrl_a);
3849 qib_write_kreg(dd, kr_scratch, 0ULL);
3850 goto bail;
3851
3852 case QIB_IB_CFG_LSTATE: /* set the IB link state */
3853 switch (val & 0xffff0000) {
3854 case IB_LINKCMD_DOWN:
3855 lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
3856 ppd->cpspec->ibmalfusesnap = 1;
3857 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
3858 crp_errlink);
3859 if (!ppd->cpspec->ibdeltainprog &&
3860 qib_compat_ddr_negotiate) {
3861 ppd->cpspec->ibdeltainprog = 1;
3862 ppd->cpspec->ibsymsnap =
3863 read_7322_creg32_port(ppd,
3864 crp_ibsymbolerr);
3865 ppd->cpspec->iblnkerrsnap =
3866 read_7322_creg32_port(ppd,
3867 crp_iblinkerrrecov);
3868 }
3869 break;
3870
3871 case IB_LINKCMD_ARMED:
3872 lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
3873 if (ppd->cpspec->ibmalfusesnap) {
3874 ppd->cpspec->ibmalfusesnap = 0;
3875 ppd->cpspec->ibmalfdelta +=
3876 read_7322_creg32_port(ppd,
3877 crp_errlink) -
3878 ppd->cpspec->ibmalfsnap;
3879 }
3880 break;
3881
3882 case IB_LINKCMD_ACTIVE:
3883 lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
3884 break;
3885
3886 default:
3887 ret = -EINVAL;
3888 qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
3889 goto bail;
3890 }
3891 switch (val & 0xffff) {
3892 case IB_LINKINITCMD_NOP:
3893 licmd = 0;
3894 break;
3895
3896 case IB_LINKINITCMD_POLL:
3897 licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
3898 break;
3899
3900 case IB_LINKINITCMD_SLEEP:
3901 licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
3902 break;
3903
3904 case IB_LINKINITCMD_DISABLE:
3905 licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
3906 ppd->cpspec->chase_end = 0;
3907 /*
3908 * stop state chase counter and timer, if running.
3909 * wait forpending timer, but don't clear .data (ppd)!
3910 */
3911 if (ppd->cpspec->chase_timer.expires) {
3912 del_timer_sync(&ppd->cpspec->chase_timer);
3913 ppd->cpspec->chase_timer.expires = 0;
3914 }
3915 break;
3916
3917 default:
3918 ret = -EINVAL;
3919 qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
3920 val & 0xffff);
3921 goto bail;
3922 }
3923 qib_set_ib_7322_lstate(ppd, lcmd, licmd);
3924 goto bail;
3925
3926 case QIB_IB_CFG_OP_VLS:
3927 if (ppd->vls_operational != val) {
3928 ppd->vls_operational = val;
3929 set_vls(ppd);
3930 }
3931 goto bail;
3932
3933 case QIB_IB_CFG_VL_HIGH_LIMIT:
3934 qib_write_kreg_port(ppd, krp_highprio_limit, val);
3935 goto bail;
3936
3937 case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
3938 if (val > 3) {
3939 ret = -EINVAL;
3940 goto bail;
3941 }
3942 lsb = IBA7322_IBC_HRTBT_LSB;
3943 maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
3944 break;
3945
3946 case QIB_IB_CFG_PORT:
3947 /* val is the port number of the switch we are connected to. */
3948 if (ppd->dd->cspec->r1) {
3949 cancel_delayed_work(&ppd->cpspec->ipg_work);
3950 ppd->cpspec->ipg_tries = 0;
3951 }
3952 goto bail;
3953
3954 default:
3955 ret = -EINVAL;
3956 goto bail;
3957 }
3958 ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
3959 ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
3960 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
3961 qib_write_kreg(dd, kr_scratch, 0);
3962bail:
3963 return ret;
3964}
3965
3966static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
3967{
3968 int ret = 0;
3969 u64 val, ctrlb;
3970
3971 /* only IBC loopback, may add serdes and xgxs loopbacks later */
3972 if (!strncmp(what, "ibc", 3)) {
3973 ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
3974 Loopback);
3975 val = 0; /* disable heart beat, so link will come up */
3976 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
3977 ppd->dd->unit, ppd->port);
3978 } else if (!strncmp(what, "off", 3)) {
3979 ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
3980 Loopback);
3981 /* enable heart beat again */
3982 val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
3983 qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
3984 "(normal)\n", ppd->dd->unit, ppd->port);
3985 } else
3986 ret = -EINVAL;
3987 if (!ret) {
3988 qib_write_kreg_port(ppd, krp_ibcctrl_a,
3989 ppd->cpspec->ibcctrl_a);
3990 ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
3991 << IBA7322_IBC_HRTBT_LSB);
3992 ppd->cpspec->ibcctrl_b = ctrlb | val;
3993 qib_write_kreg_port(ppd, krp_ibcctrl_b,
3994 ppd->cpspec->ibcctrl_b);
3995 qib_write_kreg(ppd->dd, kr_scratch, 0);
3996 }
3997 return ret;
3998}
3999
4000static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4001 struct ib_vl_weight_elem *vl)
4002{
4003 unsigned i;
4004
4005 for (i = 0; i < 16; i++, regno++, vl++) {
4006 u32 val = qib_read_kreg_port(ppd, regno);
4007
4008 vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4009 SYM_RMASK(LowPriority0_0, VirtualLane);
4010 vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4011 SYM_RMASK(LowPriority0_0, Weight);
4012 }
4013}
4014
4015static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4016 struct ib_vl_weight_elem *vl)
4017{
4018 unsigned i;
4019
4020 for (i = 0; i < 16; i++, regno++, vl++) {
4021 u64 val;
4022
4023 val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4024 SYM_LSB(LowPriority0_0, VirtualLane)) |
4025 ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4026 SYM_LSB(LowPriority0_0, Weight));
4027 qib_write_kreg_port(ppd, regno, val);
4028 }
4029 if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4030 struct qib_devdata *dd = ppd->dd;
4031 unsigned long flags;
4032
4033 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4034 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4035 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4036 qib_write_kreg(dd, kr_scratch, 0);
4037 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4038 }
4039}
4040
4041static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4042{
4043 switch (which) {
4044 case QIB_IB_TBL_VL_HIGH_ARB:
4045 get_vl_weights(ppd, krp_highprio_0, t);
4046 break;
4047
4048 case QIB_IB_TBL_VL_LOW_ARB:
4049 get_vl_weights(ppd, krp_lowprio_0, t);
4050 break;
4051
4052 default:
4053 return -EINVAL;
4054 }
4055 return 0;
4056}
4057
4058static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4059{
4060 switch (which) {
4061 case QIB_IB_TBL_VL_HIGH_ARB:
4062 set_vl_weights(ppd, krp_highprio_0, t);
4063 break;
4064
4065 case QIB_IB_TBL_VL_LOW_ARB:
4066 set_vl_weights(ppd, krp_lowprio_0, t);
4067 break;
4068
4069 default:
4070 return -EINVAL;
4071 }
4072 return 0;
4073}
4074
4075static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
Mike Marciniszyn19ede2e2011-01-10 17:42:21 -08004076 u32 updegr, u32 egrhd, u32 npkts)
Ralph Campbellf9315512010-05-23 21:44:54 -07004077{
Mike Marciniszyn19ede2e2011-01-10 17:42:21 -08004078 /*
4079 * Need to write timeout register before updating rcvhdrhead to ensure
4080 * that the timer is enabled on reception of a packet.
4081 */
4082 if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4083 adjust_rcv_timeout(rcd, npkts);
Ralph Campbellf9315512010-05-23 21:44:54 -07004084 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4085 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4086 if (updegr)
4087 qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4088}
4089
4090static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4091{
4092 u32 head, tail;
4093
4094 head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4095 if (rcd->rcvhdrtail_kvaddr)
4096 tail = qib_get_rcvhdrtail(rcd);
4097 else
4098 tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4099 return head == tail;
4100}
4101
4102#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4103 QIB_RCVCTRL_CTXT_DIS | \
4104 QIB_RCVCTRL_TIDFLOW_ENB | \
4105 QIB_RCVCTRL_TIDFLOW_DIS | \
4106 QIB_RCVCTRL_TAILUPD_ENB | \
4107 QIB_RCVCTRL_TAILUPD_DIS | \
4108 QIB_RCVCTRL_INTRAVAIL_ENB | \
4109 QIB_RCVCTRL_INTRAVAIL_DIS | \
4110 QIB_RCVCTRL_BP_ENB | \
4111 QIB_RCVCTRL_BP_DIS)
4112
4113#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4114 QIB_RCVCTRL_CTXT_DIS | \
4115 QIB_RCVCTRL_PKEY_DIS | \
4116 QIB_RCVCTRL_PKEY_ENB)
4117
4118/*
4119 * Modify the RCVCTRL register in chip-specific way. This
4120 * is a function because bit positions and (future) register
4121 * location is chip-specifc, but the needed operations are
4122 * generic. <op> is a bit-mask because we often want to
4123 * do multiple modifications.
4124 */
4125static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4126 int ctxt)
4127{
4128 struct qib_devdata *dd = ppd->dd;
4129 struct qib_ctxtdata *rcd;
4130 u64 mask, val;
4131 unsigned long flags;
4132
4133 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4134
4135 if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4136 dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4137 if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4138 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4139 if (op & QIB_RCVCTRL_TAILUPD_ENB)
4140 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4141 if (op & QIB_RCVCTRL_TAILUPD_DIS)
4142 dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4143 if (op & QIB_RCVCTRL_PKEY_ENB)
4144 ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4145 if (op & QIB_RCVCTRL_PKEY_DIS)
4146 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4147 if (ctxt < 0) {
4148 mask = (1ULL << dd->ctxtcnt) - 1;
4149 rcd = NULL;
4150 } else {
4151 mask = (1ULL << ctxt);
4152 rcd = dd->rcd[ctxt];
4153 }
4154 if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4155 ppd->p_rcvctrl |=
4156 (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4157 if (!(dd->flags & QIB_NODMA_RTAIL)) {
4158 op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4159 dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4160 }
4161 /* Write these registers before the context is enabled. */
4162 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4163 rcd->rcvhdrqtailaddr_phys);
4164 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4165 rcd->rcvhdrq_phys);
4166 rcd->seq_cnt = 1;
Ralph Campbellf9315512010-05-23 21:44:54 -07004167 }
4168 if (op & QIB_RCVCTRL_CTXT_DIS)
4169 ppd->p_rcvctrl &=
4170 ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4171 if (op & QIB_RCVCTRL_BP_ENB)
4172 dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4173 if (op & QIB_RCVCTRL_BP_DIS)
4174 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4175 if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4176 dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4177 if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4178 dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4179 /*
4180 * Decide which registers to write depending on the ops enabled.
4181 * Special case is "flush" (no bits set at all)
4182 * which needs to write both.
4183 */
4184 if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4185 qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4186 if (op == 0 || (op & RCVCTRL_PORT_MODS))
4187 qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4188 if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4189 /*
4190 * Init the context registers also; if we were
4191 * disabled, tail and head should both be zero
4192 * already from the enable, but since we don't
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004193 * know, we have to do it explicitly.
Ralph Campbellf9315512010-05-23 21:44:54 -07004194 */
4195 val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4196 qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4197
4198 /* be sure enabling write seen; hd/tl should be 0 */
4199 (void) qib_read_kreg32(dd, kr_scratch);
4200 val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4201 dd->rcd[ctxt]->head = val;
4202 /* If kctxt, interrupt on next receive. */
4203 if (ctxt < dd->first_user_ctxt)
4204 val |= dd->rhdrhead_intr_off;
4205 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4206 } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4207 dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4208 /* arm rcv interrupt */
4209 val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4210 qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4211 }
4212 if (op & QIB_RCVCTRL_CTXT_DIS) {
4213 unsigned f;
4214
4215 /* Now that the context is disabled, clear these registers. */
4216 if (ctxt >= 0) {
4217 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4218 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4219 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4220 qib_write_ureg(dd, ur_rcvflowtable + f,
4221 TIDFLOW_ERRBITS, ctxt);
4222 } else {
4223 unsigned i;
4224
4225 for (i = 0; i < dd->cfgctxts; i++) {
4226 qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4227 i, 0);
4228 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4229 for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4230 qib_write_ureg(dd, ur_rcvflowtable + f,
4231 TIDFLOW_ERRBITS, i);
4232 }
4233 }
4234 }
4235 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4236}
4237
4238/*
4239 * Modify the SENDCTRL register in chip-specific way. This
4240 * is a function where there are multiple such registers with
4241 * slightly different layouts.
4242 * The chip doesn't allow back-to-back sendctrl writes, so write
4243 * the scratch register after writing sendctrl.
4244 *
4245 * Which register is written depends on the operation.
4246 * Most operate on the common register, while
4247 * SEND_ENB and SEND_DIS operate on the per-port ones.
4248 * SEND_ENB is included in common because it can change SPCL_TRIG
4249 */
4250#define SENDCTRL_COMMON_MODS (\
4251 QIB_SENDCTRL_CLEAR | \
4252 QIB_SENDCTRL_AVAIL_DIS | \
4253 QIB_SENDCTRL_AVAIL_ENB | \
4254 QIB_SENDCTRL_AVAIL_BLIP | \
4255 QIB_SENDCTRL_DISARM | \
4256 QIB_SENDCTRL_DISARM_ALL | \
4257 QIB_SENDCTRL_SEND_ENB)
4258
4259#define SENDCTRL_PORT_MODS (\
4260 QIB_SENDCTRL_CLEAR | \
4261 QIB_SENDCTRL_SEND_ENB | \
4262 QIB_SENDCTRL_SEND_DIS | \
4263 QIB_SENDCTRL_FLUSH)
4264
4265static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4266{
4267 struct qib_devdata *dd = ppd->dd;
4268 u64 tmp_dd_sendctrl;
4269 unsigned long flags;
4270
4271 spin_lock_irqsave(&dd->sendctrl_lock, flags);
4272
4273 /* First the dd ones that are "sticky", saved in shadow */
4274 if (op & QIB_SENDCTRL_CLEAR)
4275 dd->sendctrl = 0;
4276 if (op & QIB_SENDCTRL_AVAIL_DIS)
4277 dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4278 else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4279 dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4280 if (dd->flags & QIB_USE_SPCL_TRIG)
4281 dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4282 }
4283
4284 /* Then the ppd ones that are "sticky", saved in shadow */
4285 if (op & QIB_SENDCTRL_SEND_DIS)
4286 ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4287 else if (op & QIB_SENDCTRL_SEND_ENB)
4288 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4289
4290 if (op & QIB_SENDCTRL_DISARM_ALL) {
4291 u32 i, last;
4292
4293 tmp_dd_sendctrl = dd->sendctrl;
4294 last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4295 /*
4296 * Disarm any buffers that are not yet launched,
4297 * disabling updates until done.
4298 */
4299 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4300 for (i = 0; i < last; i++) {
4301 qib_write_kreg(dd, kr_sendctrl,
4302 tmp_dd_sendctrl |
4303 SYM_MASK(SendCtrl, Disarm) | i);
4304 qib_write_kreg(dd, kr_scratch, 0);
4305 }
4306 }
4307
4308 if (op & QIB_SENDCTRL_FLUSH) {
4309 u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4310
4311 /*
4312 * Now drain all the fifos. The Abort bit should never be
4313 * needed, so for now, at least, we don't use it.
4314 */
4315 tmp_ppd_sendctrl |=
4316 SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4317 SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4318 SYM_MASK(SendCtrl_0, TxeBypassIbc);
4319 qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4320 qib_write_kreg(dd, kr_scratch, 0);
4321 }
4322
4323 tmp_dd_sendctrl = dd->sendctrl;
4324
4325 if (op & QIB_SENDCTRL_DISARM)
4326 tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4327 ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4328 SYM_LSB(SendCtrl, DisarmSendBuf));
4329 if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4330 (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4331 tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4332
4333 if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4334 qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4335 qib_write_kreg(dd, kr_scratch, 0);
4336 }
4337
4338 if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4339 qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4340 qib_write_kreg(dd, kr_scratch, 0);
4341 }
4342
4343 if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4344 qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4345 qib_write_kreg(dd, kr_scratch, 0);
4346 }
4347
4348 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4349
4350 if (op & QIB_SENDCTRL_FLUSH) {
4351 u32 v;
4352 /*
4353 * ensure writes have hit chip, then do a few
4354 * more reads, to allow DMA of pioavail registers
4355 * to occur, so in-memory copy is in sync with
4356 * the chip. Not always safe to sleep.
4357 */
4358 v = qib_read_kreg32(dd, kr_scratch);
4359 qib_write_kreg(dd, kr_scratch, v);
4360 v = qib_read_kreg32(dd, kr_scratch);
4361 qib_write_kreg(dd, kr_scratch, v);
4362 qib_read_kreg32(dd, kr_scratch);
4363 }
4364}
4365
4366#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4367#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4368#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4369
4370/**
4371 * qib_portcntr_7322 - read a per-port chip counter
4372 * @ppd: the qlogic_ib pport
4373 * @creg: the counter to read (not a chip offset)
4374 */
4375static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4376{
4377 struct qib_devdata *dd = ppd->dd;
4378 u64 ret = 0ULL;
4379 u16 creg;
4380 /* 0xffff for unimplemented or synthesized counters */
4381 static const u32 xlator[] = {
4382 [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4383 [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4384 [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4385 [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4386 [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4387 [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4388 [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4389 [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4390 [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4391 [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4392 [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4393 [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4394 [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
4395 [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4396 [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4397 [QIBPORTCNTR_ERRICRC] = crp_erricrc,
4398 [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4399 [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4400 [QIBPORTCNTR_BADFORMAT] = crp_badformat,
4401 [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4402 [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4403 [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4404 [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4405 [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4406 [QIBPORTCNTR_ERRLINK] = crp_errlink,
4407 [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4408 [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4409 [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4410 [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4411 [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4412 /*
4413 * the next 3 aren't really counters, but were implemented
4414 * as counters in older chips, so still get accessed as
4415 * though they were counters from this code.
4416 */
4417 [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4418 [QIBPORTCNTR_PSSTART] = krp_psstart,
4419 [QIBPORTCNTR_PSSTAT] = krp_psstat,
4420 /* pseudo-counter, summed for all ports */
4421 [QIBPORTCNTR_KHDROVFL] = 0xffff,
4422 };
4423
4424 if (reg >= ARRAY_SIZE(xlator)) {
4425 qib_devinfo(ppd->dd->pcidev,
4426 "Unimplemented portcounter %u\n", reg);
4427 goto done;
4428 }
4429 creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4430
4431 /* handle non-counters and special cases first */
4432 if (reg == QIBPORTCNTR_KHDROVFL) {
4433 int i;
4434
4435 /* sum over all kernel contexts (skip if mini_init) */
4436 for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4437 struct qib_ctxtdata *rcd = dd->rcd[i];
4438
4439 if (!rcd || rcd->ppd != ppd)
4440 continue;
4441 ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4442 }
4443 goto done;
4444 } else if (reg == QIBPORTCNTR_RXDROPPKT) {
4445 /*
4446 * Used as part of the synthesis of port_rcv_errors
4447 * in the verbs code for IBTA counters. Not needed for 7322,
4448 * because all the errors are already counted by other cntrs.
4449 */
4450 goto done;
4451 } else if (reg == QIBPORTCNTR_PSINTERVAL ||
4452 reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4453 /* were counters in older chips, now per-port kernel regs */
4454 ret = qib_read_kreg_port(ppd, creg);
4455 goto done;
4456 }
4457
4458 /*
4459 * Only fast increment counters are 64 bits; use 32 bit reads to
4460 * avoid two independent reads when on Opteron.
4461 */
4462 if (xlator[reg] & _PORT_64BIT_FLAG)
4463 ret = read_7322_creg_port(ppd, creg);
4464 else
4465 ret = read_7322_creg32_port(ppd, creg);
4466 if (creg == crp_ibsymbolerr) {
4467 if (ppd->cpspec->ibdeltainprog)
4468 ret -= ret - ppd->cpspec->ibsymsnap;
4469 ret -= ppd->cpspec->ibsymdelta;
4470 } else if (creg == crp_iblinkerrrecov) {
4471 if (ppd->cpspec->ibdeltainprog)
4472 ret -= ret - ppd->cpspec->iblnkerrsnap;
4473 ret -= ppd->cpspec->iblnkerrdelta;
4474 } else if (creg == crp_errlink)
4475 ret -= ppd->cpspec->ibmalfdelta;
4476 else if (creg == crp_iblinkdown)
4477 ret += ppd->cpspec->iblnkdowndelta;
4478done:
4479 return ret;
4480}
4481
4482/*
4483 * Device counter names (not port-specific), one line per stat,
4484 * single string. Used by utilities like ipathstats to print the stats
4485 * in a way which works for different versions of drivers, without changing
4486 * the utility. Names need to be 12 chars or less (w/o newline), for proper
4487 * display by utility.
4488 * Non-error counters are first.
4489 * Start of "error" conters is indicated by a leading "E " on the first
4490 * "error" counter, and doesn't count in label length.
4491 * The EgrOvfl list needs to be last so we truncate them at the configured
4492 * context count for the device.
4493 * cntr7322indices contains the corresponding register indices.
4494 */
4495static const char cntr7322names[] =
4496 "Interrupts\n"
4497 "HostBusStall\n"
4498 "E RxTIDFull\n"
4499 "RxTIDInvalid\n"
4500 "RxTIDFloDrop\n" /* 7322 only */
4501 "Ctxt0EgrOvfl\n"
4502 "Ctxt1EgrOvfl\n"
4503 "Ctxt2EgrOvfl\n"
4504 "Ctxt3EgrOvfl\n"
4505 "Ctxt4EgrOvfl\n"
4506 "Ctxt5EgrOvfl\n"
4507 "Ctxt6EgrOvfl\n"
4508 "Ctxt7EgrOvfl\n"
4509 "Ctxt8EgrOvfl\n"
4510 "Ctxt9EgrOvfl\n"
4511 "Ctx10EgrOvfl\n"
4512 "Ctx11EgrOvfl\n"
4513 "Ctx12EgrOvfl\n"
4514 "Ctx13EgrOvfl\n"
4515 "Ctx14EgrOvfl\n"
4516 "Ctx15EgrOvfl\n"
4517 "Ctx16EgrOvfl\n"
4518 "Ctx17EgrOvfl\n"
4519 ;
4520
4521static const u32 cntr7322indices[] = {
4522 cr_lbint | _PORT_64BIT_FLAG,
4523 cr_lbstall | _PORT_64BIT_FLAG,
4524 cr_tidfull,
4525 cr_tidinvalid,
4526 cr_rxtidflowdrop,
4527 cr_base_egrovfl + 0,
4528 cr_base_egrovfl + 1,
4529 cr_base_egrovfl + 2,
4530 cr_base_egrovfl + 3,
4531 cr_base_egrovfl + 4,
4532 cr_base_egrovfl + 5,
4533 cr_base_egrovfl + 6,
4534 cr_base_egrovfl + 7,
4535 cr_base_egrovfl + 8,
4536 cr_base_egrovfl + 9,
4537 cr_base_egrovfl + 10,
4538 cr_base_egrovfl + 11,
4539 cr_base_egrovfl + 12,
4540 cr_base_egrovfl + 13,
4541 cr_base_egrovfl + 14,
4542 cr_base_egrovfl + 15,
4543 cr_base_egrovfl + 16,
4544 cr_base_egrovfl + 17,
4545};
4546
4547/*
4548 * same as cntr7322names and cntr7322indices, but for port-specific counters.
4549 * portcntr7322indices is somewhat complicated by some registers needing
4550 * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4551 */
4552static const char portcntr7322names[] =
4553 "TxPkt\n"
4554 "TxFlowPkt\n"
4555 "TxWords\n"
4556 "RxPkt\n"
4557 "RxFlowPkt\n"
4558 "RxWords\n"
4559 "TxFlowStall\n"
4560 "TxDmaDesc\n" /* 7220 and 7322-only */
4561 "E RxDlidFltr\n" /* 7220 and 7322-only */
4562 "IBStatusChng\n"
4563 "IBLinkDown\n"
4564 "IBLnkRecov\n"
4565 "IBRxLinkErr\n"
4566 "IBSymbolErr\n"
4567 "RxLLIErr\n"
4568 "RxBadFormat\n"
4569 "RxBadLen\n"
4570 "RxBufOvrfl\n"
4571 "RxEBP\n"
4572 "RxFlowCtlErr\n"
4573 "RxICRCerr\n"
4574 "RxLPCRCerr\n"
4575 "RxVCRCerr\n"
4576 "RxInvalLen\n"
4577 "RxInvalPKey\n"
4578 "RxPktDropped\n"
4579 "TxBadLength\n"
4580 "TxDropped\n"
4581 "TxInvalLen\n"
4582 "TxUnderrun\n"
4583 "TxUnsupVL\n"
4584 "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4585 "RxVL15Drop\n"
4586 "RxVlErr\n"
4587 "XcessBufOvfl\n"
4588 "RxQPBadCtxt\n" /* 7322-only from here down */
4589 "TXBadHeader\n"
4590 ;
4591
4592static const u32 portcntr7322indices[] = {
4593 QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4594 crp_pktsendflow,
4595 QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4596 QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4597 crp_pktrcvflowctrl,
4598 QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4599 QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4600 crp_txsdmadesc | _PORT_64BIT_FLAG,
4601 crp_rxdlidfltr,
4602 crp_ibstatuschange,
4603 QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4604 QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4605 QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4606 QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4607 QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4608 QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4609 QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4610 QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4611 QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4612 crp_rcvflowctrlviol,
4613 QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4614 QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4615 QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4616 QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4617 QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4618 QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4619 crp_txminmaxlenerr,
4620 crp_txdroppedpkt,
4621 crp_txlenerr,
4622 crp_txunderrun,
4623 crp_txunsupvl,
4624 QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4625 QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4626 QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4627 QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4628 crp_rxqpinvalidctxt,
4629 crp_txhdrerr,
4630};
4631
4632/* do all the setup to make the counter reads efficient later */
4633static void init_7322_cntrnames(struct qib_devdata *dd)
4634{
4635 int i, j = 0;
4636 char *s;
4637
4638 for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4639 i++) {
4640 /* we always have at least one counter before the egrovfl */
4641 if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
4642 j = 1;
4643 s = strchr(s + 1, '\n');
4644 if (s && j)
4645 j++;
4646 }
4647 dd->cspec->ncntrs = i;
4648 if (!s)
4649 /* full list; size is without terminating null */
4650 dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
4651 else
4652 dd->cspec->cntrnamelen = 1 + s - cntr7322names;
4653 dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
4654 * sizeof(u64), GFP_KERNEL);
4655 if (!dd->cspec->cntrs)
4656 qib_dev_err(dd, "Failed allocation for counters\n");
4657
4658 for (i = 0, s = (char *)portcntr7322names; s; i++)
4659 s = strchr(s + 1, '\n');
4660 dd->cspec->nportcntrs = i - 1;
4661 dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
4662 for (i = 0; i < dd->num_pports; ++i) {
4663 dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
4664 * sizeof(u64), GFP_KERNEL);
4665 if (!dd->pport[i].cpspec->portcntrs)
4666 qib_dev_err(dd, "Failed allocation for"
4667 " portcounters\n");
4668 }
4669}
4670
4671static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
4672 u64 **cntrp)
4673{
4674 u32 ret;
4675
4676 if (namep) {
4677 ret = dd->cspec->cntrnamelen;
4678 if (pos >= ret)
4679 ret = 0; /* final read after getting everything */
4680 else
4681 *namep = (char *) cntr7322names;
4682 } else {
4683 u64 *cntr = dd->cspec->cntrs;
4684 int i;
4685
4686 ret = dd->cspec->ncntrs * sizeof(u64);
4687 if (!cntr || pos >= ret) {
4688 /* everything read, or couldn't get memory */
4689 ret = 0;
4690 goto done;
4691 }
4692 *cntrp = cntr;
4693 for (i = 0; i < dd->cspec->ncntrs; i++)
4694 if (cntr7322indices[i] & _PORT_64BIT_FLAG)
4695 *cntr++ = read_7322_creg(dd,
4696 cntr7322indices[i] &
4697 _PORT_CNTR_IDXMASK);
4698 else
4699 *cntr++ = read_7322_creg32(dd,
4700 cntr7322indices[i]);
4701 }
4702done:
4703 return ret;
4704}
4705
4706static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
4707 char **namep, u64 **cntrp)
4708{
4709 u32 ret;
4710
4711 if (namep) {
4712 ret = dd->cspec->portcntrnamelen;
4713 if (pos >= ret)
4714 ret = 0; /* final read after getting everything */
4715 else
4716 *namep = (char *)portcntr7322names;
4717 } else {
4718 struct qib_pportdata *ppd = &dd->pport[port];
4719 u64 *cntr = ppd->cpspec->portcntrs;
4720 int i;
4721
4722 ret = dd->cspec->nportcntrs * sizeof(u64);
4723 if (!cntr || pos >= ret) {
4724 /* everything read, or couldn't get memory */
4725 ret = 0;
4726 goto done;
4727 }
4728 *cntrp = cntr;
4729 for (i = 0; i < dd->cspec->nportcntrs; i++) {
4730 if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
4731 *cntr++ = qib_portcntr_7322(ppd,
4732 portcntr7322indices[i] &
4733 _PORT_CNTR_IDXMASK);
4734 else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
4735 *cntr++ = read_7322_creg_port(ppd,
4736 portcntr7322indices[i] &
4737 _PORT_CNTR_IDXMASK);
4738 else
4739 *cntr++ = read_7322_creg32_port(ppd,
4740 portcntr7322indices[i]);
4741 }
4742 }
4743done:
4744 return ret;
4745}
4746
4747/**
4748 * qib_get_7322_faststats - get word counters from chip before they overflow
4749 * @opaque - contains a pointer to the qlogic_ib device qib_devdata
4750 *
4751 * VESTIGIAL IBA7322 has no "small fast counters", so the only
4752 * real purpose of this function is to maintain the notion of
4753 * "active time", which in turn is only logged into the eeprom,
4754 * which we don;t have, yet, for 7322-based boards.
4755 *
4756 * called from add_timer
4757 */
4758static void qib_get_7322_faststats(unsigned long opaque)
4759{
4760 struct qib_devdata *dd = (struct qib_devdata *) opaque;
4761 struct qib_pportdata *ppd;
4762 unsigned long flags;
4763 u64 traffic_wds;
4764 int pidx;
4765
4766 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
4767 ppd = dd->pport + pidx;
4768
4769 /*
4770 * If port isn't enabled or not operational ports, or
4771 * diags is running (can cause memory diags to fail)
4772 * skip this port this time.
4773 */
4774 if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
4775 || dd->diag_client)
4776 continue;
4777
4778 /*
4779 * Maintain an activity timer, based on traffic
4780 * exceeding a threshold, so we need to check the word-counts
4781 * even if they are 64-bit.
4782 */
4783 traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
4784 qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
4785 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
4786 traffic_wds -= ppd->dd->traffic_wds;
4787 ppd->dd->traffic_wds += traffic_wds;
4788 if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
4789 atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
4790 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
4791 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
4792 QIB_IB_QDR) &&
4793 (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
4794 QIBL_LINKACTIVE)) &&
4795 ppd->cpspec->qdr_dfe_time &&
4796 time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
4797 ppd->cpspec->qdr_dfe_on = 0;
4798
4799 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
4800 ppd->dd->cspec->r1 ?
4801 QDR_STATIC_ADAPT_INIT_R1 :
4802 QDR_STATIC_ADAPT_INIT);
4803 force_h1(ppd);
4804 }
4805 }
4806 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
4807}
4808
4809/*
4810 * If we were using MSIx, try to fallback to INTx.
4811 */
4812static int qib_7322_intr_fallback(struct qib_devdata *dd)
4813{
4814 if (!dd->cspec->num_msix_entries)
4815 return 0; /* already using INTx */
4816
4817 qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
4818 " trying INTx interrupts\n");
4819 qib_7322_nomsix(dd);
4820 qib_enable_intx(dd->pcidev);
4821 qib_setup_7322_interrupt(dd, 0);
4822 return 1;
4823}
4824
4825/*
4826 * Reset the XGXS (between serdes and IBC). Slightly less intrusive
4827 * than resetting the IBC or external link state, and useful in some
4828 * cases to cause some retraining. To do this right, we reset IBC
4829 * as well, then return to previous state (which may be still in reset)
4830 * NOTE: some callers of this "know" this writes the current value
4831 * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
4832 * check all callers.
4833 */
4834static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
4835{
4836 u64 val;
4837 struct qib_devdata *dd = ppd->dd;
4838 const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
4839 SYM_MASK(IBPCSConfig_0, xcv_treset) |
4840 SYM_MASK(IBPCSConfig_0, tx_rx_reset);
4841
4842 val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
Ralph Campbellb9e03e02010-06-17 23:13:54 +00004843 qib_write_kreg(dd, kr_hwerrmask,
4844 dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
Ralph Campbellf9315512010-05-23 21:44:54 -07004845 qib_write_kreg_port(ppd, krp_ibcctrl_a,
4846 ppd->cpspec->ibcctrl_a &
4847 ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
4848
4849 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
4850 qib_read_kreg32(dd, kr_scratch);
4851 qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
4852 qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4853 qib_write_kreg(dd, kr_scratch, 0ULL);
Ralph Campbellb9e03e02010-06-17 23:13:54 +00004854 qib_write_kreg(dd, kr_hwerrclear,
4855 SYM_MASK(HwErrClear, statusValidNoEopClear));
4856 qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
Ralph Campbellf9315512010-05-23 21:44:54 -07004857}
4858
4859/*
4860 * This code for non-IBTA-compliant IB speed negotiation is only known to
4861 * work for the SDR to DDR transition, and only between an HCA and a switch
4862 * with recent firmware. It is based on observed heuristics, rather than
4863 * actual knowledge of the non-compliant speed negotiation.
4864 * It has a number of hard-coded fields, since the hope is to rewrite this
4865 * when a spec is available on how the negoation is intended to work.
4866 */
4867static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
4868 u32 dcnt, u32 *data)
4869{
4870 int i;
4871 u64 pbc;
4872 u32 __iomem *piobuf;
4873 u32 pnum, control, len;
4874 struct qib_devdata *dd = ppd->dd;
4875
4876 i = 0;
4877 len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
4878 control = qib_7322_setpbc_control(ppd, len, 0, 15);
4879 pbc = ((u64) control << 32) | len;
4880 while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
4881 if (i++ > 15)
4882 return;
4883 udelay(2);
4884 }
4885 /* disable header check on this packet, since it can't be valid */
4886 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
4887 writeq(pbc, piobuf);
4888 qib_flush_wc();
4889 qib_pio_copy(piobuf + 2, hdr, 7);
4890 qib_pio_copy(piobuf + 9, data, dcnt);
4891 if (dd->flags & QIB_USE_SPCL_TRIG) {
4892 u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
4893
4894 qib_flush_wc();
4895 __raw_writel(0xaebecede, piobuf + spcl_off);
4896 }
4897 qib_flush_wc();
4898 qib_sendbuf_done(dd, pnum);
4899 /* and re-enable hdr check */
4900 dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
4901}
4902
4903/*
4904 * _start packet gets sent twice at start, _done gets sent twice at end
4905 */
4906static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
4907{
4908 struct qib_devdata *dd = ppd->dd;
4909 static u32 swapped;
4910 u32 dw, i, hcnt, dcnt, *data;
4911 static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
4912 static u32 madpayload_start[0x40] = {
4913 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
4914 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
4915 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
4916 };
4917 static u32 madpayload_done[0x40] = {
4918 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
4919 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
4920 0x40000001, 0x1388, 0x15e, /* rest 0's */
4921 };
4922
4923 dcnt = ARRAY_SIZE(madpayload_start);
4924 hcnt = ARRAY_SIZE(hdr);
4925 if (!swapped) {
4926 /* for maintainability, do it at runtime */
4927 for (i = 0; i < hcnt; i++) {
4928 dw = (__force u32) cpu_to_be32(hdr[i]);
4929 hdr[i] = dw;
4930 }
4931 for (i = 0; i < dcnt; i++) {
4932 dw = (__force u32) cpu_to_be32(madpayload_start[i]);
4933 madpayload_start[i] = dw;
4934 dw = (__force u32) cpu_to_be32(madpayload_done[i]);
4935 madpayload_done[i] = dw;
4936 }
4937 swapped = 1;
4938 }
4939
4940 data = which ? madpayload_done : madpayload_start;
4941
4942 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
4943 qib_read_kreg64(dd, kr_scratch);
4944 udelay(2);
4945 autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
4946 qib_read_kreg64(dd, kr_scratch);
4947 udelay(2);
4948}
4949
4950/*
4951 * Do the absolute minimum to cause an IB speed change, and make it
4952 * ready, but don't actually trigger the change. The caller will
4953 * do that when ready (if link is in Polling training state, it will
4954 * happen immediately, otherwise when link next goes down)
4955 *
4956 * This routine should only be used as part of the DDR autonegotation
4957 * code for devices that are not compliant with IB 1.2 (or code that
4958 * fixes things up for same).
4959 *
4960 * When link has gone down, and autoneg enabled, or autoneg has
4961 * failed and we give up until next time we set both speeds, and
4962 * then we want IBTA enabled as well as "use max enabled speed.
4963 */
4964static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
4965{
4966 u64 newctrlb;
4967 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
4968 IBA7322_IBC_IBTA_1_2_MASK |
4969 IBA7322_IBC_MAX_SPEED_MASK);
4970
4971 if (speed & (speed - 1)) /* multiple speeds */
4972 newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
4973 IBA7322_IBC_IBTA_1_2_MASK |
4974 IBA7322_IBC_MAX_SPEED_MASK;
4975 else
4976 newctrlb |= speed == QIB_IB_QDR ?
4977 IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
4978 ((speed == QIB_IB_DDR ?
4979 IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
4980
4981 if (newctrlb == ppd->cpspec->ibcctrl_b)
4982 return;
4983
4984 ppd->cpspec->ibcctrl_b = newctrlb;
4985 qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4986 qib_write_kreg(ppd->dd, kr_scratch, 0);
4987}
4988
4989/*
4990 * This routine is only used when we are not talking to another
4991 * IB 1.2-compliant device that we think can do DDR.
4992 * (This includes all existing switch chips as of Oct 2007.)
4993 * 1.2-compliant devices go directly to DDR prior to reaching INIT
4994 */
4995static void try_7322_autoneg(struct qib_pportdata *ppd)
4996{
4997 unsigned long flags;
4998
4999 spin_lock_irqsave(&ppd->lflags_lock, flags);
5000 ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5001 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5002 qib_autoneg_7322_send(ppd, 0);
5003 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5004 qib_7322_mini_pcs_reset(ppd);
5005 /* 2 msec is minimum length of a poll cycle */
Tejun Heof0626712010-10-19 15:24:36 +00005006 queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5007 msecs_to_jiffies(2));
Ralph Campbellf9315512010-05-23 21:44:54 -07005008}
5009
5010/*
5011 * Handle the empirically determined mechanism for auto-negotiation
5012 * of DDR speed with switches.
5013 */
5014static void autoneg_7322_work(struct work_struct *work)
5015{
5016 struct qib_pportdata *ppd;
5017 struct qib_devdata *dd;
5018 u64 startms;
5019 u32 i;
5020 unsigned long flags;
5021
5022 ppd = container_of(work, struct qib_chippport_specific,
5023 autoneg_work.work)->ppd;
5024 dd = ppd->dd;
5025
5026 startms = jiffies_to_msecs(jiffies);
5027
5028 /*
5029 * Busy wait for this first part, it should be at most a
5030 * few hundred usec, since we scheduled ourselves for 2msec.
5031 */
5032 for (i = 0; i < 25; i++) {
5033 if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5034 == IB_7322_LT_STATE_POLLQUIET) {
5035 qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5036 break;
5037 }
5038 udelay(100);
5039 }
5040
5041 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5042 goto done; /* we got there early or told to stop */
5043
5044 /* we expect this to timeout */
5045 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5046 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5047 msecs_to_jiffies(90)))
5048 goto done;
5049 qib_7322_mini_pcs_reset(ppd);
5050
5051 /* we expect this to timeout */
5052 if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5053 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5054 msecs_to_jiffies(1700)))
5055 goto done;
5056 qib_7322_mini_pcs_reset(ppd);
5057
5058 set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5059
5060 /*
5061 * Wait up to 250 msec for link to train and get to INIT at DDR;
5062 * this should terminate early.
5063 */
5064 wait_event_timeout(ppd->cpspec->autoneg_wait,
5065 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5066 msecs_to_jiffies(250));
5067done:
5068 if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5069 spin_lock_irqsave(&ppd->lflags_lock, flags);
5070 ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5071 if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5072 ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5073 ppd->cpspec->autoneg_tries = 0;
5074 }
5075 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5076 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5077 }
5078}
5079
5080/*
5081 * This routine is used to request IPG set in the QLogic switch.
5082 * Only called if r1.
5083 */
5084static void try_7322_ipg(struct qib_pportdata *ppd)
5085{
5086 struct qib_ibport *ibp = &ppd->ibport_data;
5087 struct ib_mad_send_buf *send_buf;
5088 struct ib_mad_agent *agent;
5089 struct ib_smp *smp;
5090 unsigned delay;
5091 int ret;
5092
5093 agent = ibp->send_agent;
5094 if (!agent)
5095 goto retry;
5096
5097 send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5098 IB_MGMT_MAD_DATA, GFP_ATOMIC);
5099 if (IS_ERR(send_buf))
5100 goto retry;
5101
5102 if (!ibp->smi_ah) {
5103 struct ib_ah_attr attr;
5104 struct ib_ah *ah;
5105
5106 memset(&attr, 0, sizeof attr);
5107 attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
5108 attr.port_num = ppd->port;
5109 ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
5110 if (IS_ERR(ah))
5111 ret = -EINVAL;
5112 else {
5113 send_buf->ah = ah;
5114 ibp->smi_ah = to_iah(ah);
5115 ret = 0;
5116 }
5117 } else {
5118 send_buf->ah = &ibp->smi_ah->ibah;
5119 ret = 0;
5120 }
5121
5122 smp = send_buf->mad;
5123 smp->base_version = IB_MGMT_BASE_VERSION;
5124 smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5125 smp->class_version = 1;
5126 smp->method = IB_MGMT_METHOD_SEND;
5127 smp->hop_cnt = 1;
5128 smp->attr_id = QIB_VENDOR_IPG;
5129 smp->attr_mod = 0;
5130
5131 if (!ret)
5132 ret = ib_post_send_mad(send_buf, NULL);
5133 if (ret)
5134 ib_free_send_mad(send_buf);
5135retry:
5136 delay = 2 << ppd->cpspec->ipg_tries;
Tejun Heof0626712010-10-19 15:24:36 +00005137 queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5138 msecs_to_jiffies(delay));
Ralph Campbellf9315512010-05-23 21:44:54 -07005139}
5140
5141/*
5142 * Timeout handler for setting IPG.
5143 * Only called if r1.
5144 */
5145static void ipg_7322_work(struct work_struct *work)
5146{
5147 struct qib_pportdata *ppd;
5148
5149 ppd = container_of(work, struct qib_chippport_specific,
5150 ipg_work.work)->ppd;
5151 if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5152 && ++ppd->cpspec->ipg_tries <= 10)
5153 try_7322_ipg(ppd);
5154}
5155
5156static u32 qib_7322_iblink_state(u64 ibcs)
5157{
5158 u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5159
5160 switch (state) {
5161 case IB_7322_L_STATE_INIT:
5162 state = IB_PORT_INIT;
5163 break;
5164 case IB_7322_L_STATE_ARM:
5165 state = IB_PORT_ARMED;
5166 break;
5167 case IB_7322_L_STATE_ACTIVE:
5168 /* fall through */
5169 case IB_7322_L_STATE_ACT_DEFER:
5170 state = IB_PORT_ACTIVE;
5171 break;
5172 default: /* fall through */
5173 case IB_7322_L_STATE_DOWN:
5174 state = IB_PORT_DOWN;
5175 break;
5176 }
5177 return state;
5178}
5179
5180/* returns the IBTA port state, rather than the IBC link training state */
5181static u8 qib_7322_phys_portstate(u64 ibcs)
5182{
5183 u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5184 return qib_7322_physportstate[state];
5185}
5186
5187static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5188{
5189 int ret = 0, symadj = 0;
5190 unsigned long flags;
5191 int mult;
5192
5193 spin_lock_irqsave(&ppd->lflags_lock, flags);
5194 ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5195 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5196
5197 /* Update our picture of width and speed from chip */
5198 if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5199 ppd->link_speed_active = QIB_IB_QDR;
5200 mult = 4;
5201 } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5202 ppd->link_speed_active = QIB_IB_DDR;
5203 mult = 2;
5204 } else {
5205 ppd->link_speed_active = QIB_IB_SDR;
5206 mult = 1;
5207 }
5208 if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5209 ppd->link_width_active = IB_WIDTH_4X;
5210 mult *= 4;
5211 } else
5212 ppd->link_width_active = IB_WIDTH_1X;
5213 ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5214
5215 if (!ibup) {
5216 u64 clr;
5217
5218 /* Link went down. */
5219 /* do IPG MAD again after linkdown, even if last time failed */
5220 ppd->cpspec->ipg_tries = 0;
5221 clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5222 (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5223 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5224 if (clr)
5225 qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5226 if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5227 QIBL_IB_AUTONEG_INPROG)))
5228 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5229 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
Mitko Haralanovdde05cb2011-10-19 18:46:40 -04005230 struct qib_qsfp_data *qd =
5231 &ppd->cpspec->qsfp_data;
Ralph Campbella77fcf82010-05-26 16:08:44 -07005232 /* unlock the Tx settings, speed may change */
5233 qib_write_kreg_port(ppd, krp_tx_deemph_override,
5234 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5235 reset_tx_deemphasis_override));
Ralph Campbellf9315512010-05-23 21:44:54 -07005236 qib_cancel_sends(ppd);
Ralph Campbella77fcf82010-05-26 16:08:44 -07005237 /* on link down, ensure sane pcs state */
5238 qib_7322_mini_pcs_reset(ppd);
Mitko Haralanovdde05cb2011-10-19 18:46:40 -04005239 /* schedule the qsfp refresh which should turn the link
5240 off */
5241 if (ppd->dd->flags & QIB_HAS_QSFP) {
5242 qd->t_insert = get_jiffies_64();
5243 schedule_work(&qd->work);
5244 }
Ralph Campbellf9315512010-05-23 21:44:54 -07005245 spin_lock_irqsave(&ppd->sdma_lock, flags);
5246 if (__qib_sdma_running(ppd))
5247 __qib_sdma_process_event(ppd,
5248 qib_sdma_event_e70_go_idle);
5249 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5250 }
5251 clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5252 if (clr == ppd->cpspec->iblnkdownsnap)
5253 ppd->cpspec->iblnkdowndelta++;
5254 } else {
5255 if (qib_compat_ddr_negotiate &&
5256 !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5257 QIBL_IB_AUTONEG_INPROG)) &&
5258 ppd->link_speed_active == QIB_IB_SDR &&
5259 (ppd->link_speed_enabled & QIB_IB_DDR)
5260 && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5261 /* we are SDR, and auto-negotiation enabled */
5262 ++ppd->cpspec->autoneg_tries;
5263 if (!ppd->cpspec->ibdeltainprog) {
5264 ppd->cpspec->ibdeltainprog = 1;
5265 ppd->cpspec->ibsymdelta +=
5266 read_7322_creg32_port(ppd,
5267 crp_ibsymbolerr) -
5268 ppd->cpspec->ibsymsnap;
5269 ppd->cpspec->iblnkerrdelta +=
5270 read_7322_creg32_port(ppd,
5271 crp_iblinkerrrecov) -
5272 ppd->cpspec->iblnkerrsnap;
5273 }
5274 try_7322_autoneg(ppd);
5275 ret = 1; /* no other IB status change processing */
5276 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5277 ppd->link_speed_active == QIB_IB_SDR) {
5278 qib_autoneg_7322_send(ppd, 1);
5279 set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5280 qib_7322_mini_pcs_reset(ppd);
5281 udelay(2);
5282 ret = 1; /* no other IB status change processing */
5283 } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5284 (ppd->link_speed_active & QIB_IB_DDR)) {
5285 spin_lock_irqsave(&ppd->lflags_lock, flags);
5286 ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5287 QIBL_IB_AUTONEG_FAILED);
5288 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5289 ppd->cpspec->autoneg_tries = 0;
5290 /* re-enable SDR, for next link down */
5291 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5292 wake_up(&ppd->cpspec->autoneg_wait);
5293 symadj = 1;
5294 } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5295 /*
5296 * Clear autoneg failure flag, and do setup
5297 * so we'll try next time link goes down and
5298 * back to INIT (possibly connected to a
5299 * different device).
5300 */
5301 spin_lock_irqsave(&ppd->lflags_lock, flags);
5302 ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5303 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5304 ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5305 symadj = 1;
5306 }
5307 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5308 symadj = 1;
5309 if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5310 try_7322_ipg(ppd);
5311 if (!ppd->cpspec->recovery_init)
5312 setup_7322_link_recovery(ppd, 0);
5313 ppd->cpspec->qdr_dfe_time = jiffies +
5314 msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5315 }
5316 ppd->cpspec->ibmalfusesnap = 0;
5317 ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5318 crp_errlink);
5319 }
5320 if (symadj) {
5321 ppd->cpspec->iblnkdownsnap =
5322 read_7322_creg32_port(ppd, crp_iblinkdown);
5323 if (ppd->cpspec->ibdeltainprog) {
5324 ppd->cpspec->ibdeltainprog = 0;
5325 ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5326 crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5327 ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5328 crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5329 }
5330 } else if (!ibup && qib_compat_ddr_negotiate &&
5331 !ppd->cpspec->ibdeltainprog &&
5332 !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5333 ppd->cpspec->ibdeltainprog = 1;
5334 ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5335 crp_ibsymbolerr);
5336 ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5337 crp_iblinkerrrecov);
5338 }
5339
5340 if (!ret)
5341 qib_setup_7322_setextled(ppd, ibup);
5342 return ret;
5343}
5344
5345/*
5346 * Does read/modify/write to appropriate registers to
5347 * set output and direction bits selected by mask.
5348 * these are in their canonical postions (e.g. lsb of
5349 * dir will end up in D48 of extctrl on existing chips).
5350 * returns contents of GP Inputs.
5351 */
5352static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5353{
5354 u64 read_val, new_out;
5355 unsigned long flags;
5356
5357 if (mask) {
5358 /* some bits being written, lock access to GPIO */
5359 dir &= mask;
5360 out &= mask;
5361 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5362 dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5363 dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5364 new_out = (dd->cspec->gpio_out & ~mask) | out;
5365
5366 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5367 qib_write_kreg(dd, kr_gpio_out, new_out);
5368 dd->cspec->gpio_out = new_out;
5369 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5370 }
5371 /*
5372 * It is unlikely that a read at this time would get valid
5373 * data on a pin whose direction line was set in the same
5374 * call to this function. We include the read here because
5375 * that allows us to potentially combine a change on one pin with
5376 * a read on another, and because the old code did something like
5377 * this.
5378 */
5379 read_val = qib_read_kreg64(dd, kr_extstatus);
5380 return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5381}
5382
5383/* Enable writes to config EEPROM, if possible. Returns previous state */
5384static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5385{
5386 int prev_wen;
5387 u32 mask;
5388
5389 mask = 1 << QIB_EEPROM_WEN_NUM;
5390 prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5391 gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5392
5393 return prev_wen & 1;
5394}
5395
5396/*
5397 * Read fundamental info we need to use the chip. These are
5398 * the registers that describe chip capabilities, and are
5399 * saved in shadow registers.
5400 */
5401static void get_7322_chip_params(struct qib_devdata *dd)
5402{
5403 u64 val;
5404 u32 piobufs;
5405 int mtu;
5406
5407 dd->palign = qib_read_kreg32(dd, kr_pagealign);
5408
5409 dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5410
5411 dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5412 dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5413 dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5414 dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5415 dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5416
5417 val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5418 dd->piobcnt2k = val & ~0U;
5419 dd->piobcnt4k = val >> 32;
5420 val = qib_read_kreg64(dd, kr_sendpiosize);
5421 dd->piosize2k = val & ~0U;
5422 dd->piosize4k = val >> 32;
5423
5424 mtu = ib_mtu_enum_to_int(qib_ibmtu);
5425 if (mtu == -1)
5426 mtu = QIB_DEFAULT_MTU;
5427 dd->pport[0].ibmtu = (u32)mtu;
5428 dd->pport[1].ibmtu = (u32)mtu;
5429
5430 /* these may be adjusted in init_chip_wc_pat() */
5431 dd->pio2kbase = (u32 __iomem *)
5432 ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5433 dd->pio4kbase = (u32 __iomem *)
5434 ((char __iomem *) dd->kregbase +
5435 (dd->piobufbase >> 32));
5436 /*
5437 * 4K buffers take 2 pages; we use roundup just to be
5438 * paranoid; we calculate it once here, rather than on
5439 * ever buf allocate
5440 */
5441 dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5442
5443 piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5444
5445 dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5446 (sizeof(u64) * BITS_PER_BYTE / 2);
5447}
5448
5449/*
5450 * The chip base addresses in cspec and cpspec have to be set
5451 * after possible init_chip_wc_pat(), rather than in
5452 * get_7322_chip_params(), so split out as separate function
5453 */
5454static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5455{
5456 u32 cregbase;
5457 cregbase = qib_read_kreg32(dd, kr_counterregbase);
5458
5459 dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5460 (char __iomem *)dd->kregbase);
5461
5462 dd->egrtidbase = (u64 __iomem *)
5463 ((char __iomem *) dd->kregbase + dd->rcvegrbase);
5464
5465 /* port registers are defined as relative to base of chip */
5466 dd->pport[0].cpspec->kpregbase =
5467 (u64 __iomem *)((char __iomem *)dd->kregbase);
5468 dd->pport[1].cpspec->kpregbase =
5469 (u64 __iomem *)(dd->palign +
5470 (char __iomem *)dd->kregbase);
5471 dd->pport[0].cpspec->cpregbase =
5472 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5473 kr_counterregbase) + (char __iomem *)dd->kregbase);
5474 dd->pport[1].cpspec->cpregbase =
5475 (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5476 kr_counterregbase) + (char __iomem *)dd->kregbase);
5477}
5478
5479/*
5480 * This is a fairly special-purpose observer, so we only support
5481 * the port-specific parts of SendCtrl
5482 */
5483
5484#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
5485 SYM_MASK(SendCtrl_0, SDmaEnable) | \
5486 SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
5487 SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5488 SYM_MASK(SendCtrl_0, SDmaHalt) | \
5489 SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
5490 SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5491
5492static int sendctrl_hook(struct qib_devdata *dd,
5493 const struct diag_observer *op, u32 offs,
5494 u64 *data, u64 mask, int only_32)
5495{
5496 unsigned long flags;
5497 unsigned idx;
5498 unsigned pidx;
5499 struct qib_pportdata *ppd = NULL;
5500 u64 local_data, all_bits;
5501
5502 /*
5503 * The fixed correspondence between Physical ports and pports is
5504 * severed. We need to hunt for the ppd that corresponds
5505 * to the offset we got. And we have to do that without admitting
5506 * we know the stride, apparently.
5507 */
5508 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5509 u64 __iomem *psptr;
5510 u32 psoffs;
5511
5512 ppd = dd->pport + pidx;
5513 if (!ppd->cpspec->kpregbase)
5514 continue;
5515
5516 psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5517 psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5518 if (psoffs == offs)
5519 break;
5520 }
5521
5522 /* If pport is not being managed by driver, just avoid shadows. */
5523 if (pidx >= dd->num_pports)
5524 ppd = NULL;
5525
5526 /* In any case, "idx" is flat index in kreg space */
5527 idx = offs / sizeof(u64);
5528
5529 all_bits = ~0ULL;
5530 if (only_32)
5531 all_bits >>= 32;
5532
5533 spin_lock_irqsave(&dd->sendctrl_lock, flags);
5534 if (!ppd || (mask & all_bits) != all_bits) {
5535 /*
5536 * At least some mask bits are zero, so we need
5537 * to read. The judgement call is whether from
5538 * reg or shadow. First-cut: read reg, and complain
5539 * if any bits which should be shadowed are different
5540 * from their shadowed value.
5541 */
5542 if (only_32)
5543 local_data = (u64)qib_read_kreg32(dd, idx);
5544 else
5545 local_data = qib_read_kreg64(dd, idx);
5546 *data = (local_data & ~mask) | (*data & mask);
5547 }
5548 if (mask) {
5549 /*
5550 * At least some mask bits are one, so we need
5551 * to write, but only shadow some bits.
5552 */
5553 u64 sval, tval; /* Shadowed, transient */
5554
5555 /*
5556 * New shadow val is bits we don't want to touch,
5557 * ORed with bits we do, that are intended for shadow.
5558 */
5559 if (ppd) {
5560 sval = ppd->p_sendctrl & ~mask;
5561 sval |= *data & SENDCTRL_SHADOWED & mask;
5562 ppd->p_sendctrl = sval;
5563 } else
5564 sval = *data & SENDCTRL_SHADOWED & mask;
5565 tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5566 qib_write_kreg(dd, idx, tval);
5567 qib_write_kreg(dd, kr_scratch, 0Ull);
5568 }
5569 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5570 return only_32 ? 4 : 8;
5571}
5572
5573static const struct diag_observer sendctrl_0_observer = {
5574 sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5575 KREG_IDX(SendCtrl_0) * sizeof(u64)
5576};
5577
5578static const struct diag_observer sendctrl_1_observer = {
5579 sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5580 KREG_IDX(SendCtrl_1) * sizeof(u64)
5581};
5582
5583static ushort sdma_fetch_prio = 8;
5584module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5585MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5586
5587/* Besides logging QSFP events, we set appropriate TxDDS values */
5588static void init_txdds_table(struct qib_pportdata *ppd, int override);
5589
5590static void qsfp_7322_event(struct work_struct *work)
5591{
5592 struct qib_qsfp_data *qd;
5593 struct qib_pportdata *ppd;
5594 u64 pwrup;
Mitko Haralanov16d99812011-10-19 18:46:47 -04005595 unsigned long flags;
Ralph Campbellf9315512010-05-23 21:44:54 -07005596 int ret;
5597 u32 le2;
5598
5599 qd = container_of(work, struct qib_qsfp_data, work);
5600 ppd = qd->ppd;
Mitko Haralanovdde05cb2011-10-19 18:46:40 -04005601 pwrup = qd->t_insert +
5602 msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
Ralph Campbellf9315512010-05-23 21:44:54 -07005603
Mitko Haralanovdde05cb2011-10-19 18:46:40 -04005604 /* Delay for 20 msecs to allow ModPrs resistor to setup */
5605 mdelay(QSFP_MODPRS_LAG_MSEC);
5606
Mitko Haralanov16d99812011-10-19 18:46:47 -04005607 if (!qib_qsfp_mod_present(ppd)) {
5608 ppd->cpspec->qsfp_data.modpresent = 0;
Mitko Haralanovdde05cb2011-10-19 18:46:40 -04005609 /* Set the physical link to disabled */
5610 qib_set_ib_7322_lstate(ppd, 0,
5611 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
Mitko Haralanov16d99812011-10-19 18:46:47 -04005612 spin_lock_irqsave(&ppd->lflags_lock, flags);
5613 ppd->lflags &= ~QIBL_LINKV;
5614 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5615 } else {
Mitko Haralanovdde05cb2011-10-19 18:46:40 -04005616 /*
5617 * Some QSFP's not only do not respond until the full power-up
5618 * time, but may behave badly if we try. So hold off responding
5619 * to insertion.
5620 */
5621 while (1) {
5622 u64 now = get_jiffies_64();
5623 if (time_after64(now, pwrup))
5624 break;
5625 msleep(20);
5626 }
5627
5628 ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5629
5630 /*
5631 * Need to change LE2 back to defaults if we couldn't
5632 * read the cable type (to handle cable swaps), so do this
5633 * even on failure to read cable information. We don't
5634 * get here for QME, so IS_QME check not needed here.
5635 */
5636 if (!ret && !ppd->dd->cspec->r1) {
5637 if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5638 le2 = LE2_QME;
5639 else if (qd->cache.atten[1] >= qib_long_atten &&
5640 QSFP_IS_CU(qd->cache.tech))
5641 le2 = LE2_5m;
5642 else
5643 le2 = LE2_DEFAULT;
5644 } else
Mitko Haralanov4634b792011-02-28 13:39:49 +00005645 le2 = LE2_DEFAULT;
Mitko Haralanovdde05cb2011-10-19 18:46:40 -04005646 ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5647 /*
5648 * We always change parameteters, since we can choose
5649 * values for cables without eeproms, and the cable may have
5650 * changed from a cable with full or partial eeprom content
5651 * to one with partial or no content.
5652 */
5653 init_txdds_table(ppd, 0);
5654 /* The physical link is being re-enabled only when the
Mitko Haralanov16d99812011-10-19 18:46:47 -04005655 * previous state was DISABLED and the VALID bit is not
5656 * set. This should only happen when the cable has been
5657 * physically pulled. */
5658 if (!ppd->cpspec->qsfp_data.modpresent &&
5659 (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
5660 ppd->cpspec->qsfp_data.modpresent = 1;
Mitko Haralanovdde05cb2011-10-19 18:46:40 -04005661 qib_set_ib_7322_lstate(ppd, 0,
5662 QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
Mitko Haralanov16d99812011-10-19 18:46:47 -04005663 spin_lock_irqsave(&ppd->lflags_lock, flags);
5664 ppd->lflags |= QIBL_LINKV;
5665 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5666 }
Mitko Haralanovdde05cb2011-10-19 18:46:40 -04005667 }
Ralph Campbellf9315512010-05-23 21:44:54 -07005668}
5669
5670/*
5671 * There is little we can do but complain to the user if QSFP
5672 * initialization fails.
5673 */
5674static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
5675{
5676 unsigned long flags;
5677 struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
5678 struct qib_devdata *dd = ppd->dd;
5679 u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
5680
5681 mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
5682 qd->ppd = ppd;
5683 qib_qsfp_init(qd, qsfp_7322_event);
5684 spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5685 dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
5686 dd->cspec->gpio_mask |= mod_prs_bit;
5687 qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5688 qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
5689 spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5690}
5691
5692/*
Ralph Campbella77fcf82010-05-26 16:08:44 -07005693 * called at device initialization time, and also if the txselect
Ralph Campbellf9315512010-05-23 21:44:54 -07005694 * module parameter is changed. This is used for cables that don't
5695 * have valid QSFP EEPROMs (not present, or attenuation is zero).
5696 * We initialize to the default, then if there is a specific
Ralph Campbella77fcf82010-05-26 16:08:44 -07005697 * unit,port match, we use that (and set it immediately, for the
5698 * current speed, if the link is at INIT or better).
Ralph Campbellf9315512010-05-23 21:44:54 -07005699 * String format is "default# unit#,port#=# ... u,p=#", separators must
Ralph Campbella77fcf82010-05-26 16:08:44 -07005700 * be a SPACE character. A newline terminates. The u,p=# tuples may
5701 * optionally have "u,p=#,#", where the final # is the H1 value
Ralph Campbellf9315512010-05-23 21:44:54 -07005702 * The last specific match is used (actually, all are used, but last
5703 * one is the one that winds up set); if none at all, fall back on default.
5704 */
5705static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5706{
5707 char *nxt, *str;
Ralph Campbella77fcf82010-05-26 16:08:44 -07005708 u32 pidx, unit, port, deflt, h1;
Ralph Campbellf9315512010-05-23 21:44:54 -07005709 unsigned long val;
Ralph Campbella77fcf82010-05-26 16:08:44 -07005710 int any = 0, seth1;
Mike Marciniszyne7062032011-01-10 17:42:21 -08005711 int txdds_size;
Ralph Campbellf9315512010-05-23 21:44:54 -07005712
Ralph Campbella77fcf82010-05-26 16:08:44 -07005713 str = txselect_list;
Ralph Campbellf9315512010-05-23 21:44:54 -07005714
Ralph Campbella77fcf82010-05-26 16:08:44 -07005715 /* default number is validated in setup_txselect() */
Ralph Campbellf9315512010-05-23 21:44:54 -07005716 deflt = simple_strtoul(str, &nxt, 0);
5717 for (pidx = 0; pidx < dd->num_pports; ++pidx)
5718 dd->pport[pidx].cpspec->no_eep = deflt;
5719
Mike Marciniszyne7062032011-01-10 17:42:21 -08005720 txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
5721 if (IS_QME(dd) || IS_QMH(dd))
5722 txdds_size += TXDDS_MFG_SZ;
5723
Ralph Campbellf9315512010-05-23 21:44:54 -07005724 while (*nxt && nxt[1]) {
5725 str = ++nxt;
5726 unit = simple_strtoul(str, &nxt, 0);
5727 if (nxt == str || !*nxt || *nxt != ',') {
5728 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5729 ;
5730 continue;
5731 }
5732 str = ++nxt;
5733 port = simple_strtoul(str, &nxt, 0);
5734 if (nxt == str || *nxt != '=') {
5735 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5736 ;
5737 continue;
5738 }
5739 str = ++nxt;
5740 val = simple_strtoul(str, &nxt, 0);
5741 if (nxt == str) {
5742 while (*nxt && *nxt++ != ' ') /* skip to next, if any */
5743 ;
5744 continue;
5745 }
Mike Marciniszyne7062032011-01-10 17:42:21 -08005746 if (val >= txdds_size)
Ralph Campbellf9315512010-05-23 21:44:54 -07005747 continue;
Ralph Campbella77fcf82010-05-26 16:08:44 -07005748 seth1 = 0;
5749 h1 = 0; /* gcc thinks it might be used uninitted */
5750 if (*nxt == ',' && nxt[1]) {
5751 str = ++nxt;
5752 h1 = (u32)simple_strtoul(str, &nxt, 0);
5753 if (nxt == str)
5754 while (*nxt && *nxt++ != ' ') /* skip */
5755 ;
5756 else
5757 seth1 = 1;
5758 }
Ralph Campbellf9315512010-05-23 21:44:54 -07005759 for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
5760 ++pidx) {
Ralph Campbella77fcf82010-05-26 16:08:44 -07005761 struct qib_pportdata *ppd = &dd->pport[pidx];
5762
5763 if (ppd->port != port || !ppd->link_speed_supported)
Ralph Campbellf9315512010-05-23 21:44:54 -07005764 continue;
Ralph Campbella77fcf82010-05-26 16:08:44 -07005765 ppd->cpspec->no_eep = val;
Ralph Campbell7c7a4162010-06-17 23:14:09 +00005766 if (seth1)
5767 ppd->cpspec->h1_val = h1;
Ralph Campbellf9315512010-05-23 21:44:54 -07005768 /* now change the IBC and serdes, overriding generic */
Ralph Campbella77fcf82010-05-26 16:08:44 -07005769 init_txdds_table(ppd, 1);
Mitko Haralanovd70585f2011-01-21 13:45:17 +00005770 /* Re-enable the physical state machine on mezz boards
Mitko Haralanovdde05cb2011-10-19 18:46:40 -04005771 * now that the correct settings have been set.
5772 * QSFP boards are handles by the QSFP event handler */
Mitko Haralanovd70585f2011-01-21 13:45:17 +00005773 if (IS_QMH(dd) || IS_QME(dd))
5774 qib_set_ib_7322_lstate(ppd, 0,
5775 QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
Ralph Campbellf9315512010-05-23 21:44:54 -07005776 any++;
5777 }
5778 if (*nxt == '\n')
5779 break; /* done */
5780 }
5781 if (change && !any) {
5782 /* no specific setting, use the default.
5783 * Change the IBC and serdes, but since it's
5784 * general, don't override specific settings.
5785 */
Ralph Campbella77fcf82010-05-26 16:08:44 -07005786 for (pidx = 0; pidx < dd->num_pports; ++pidx)
5787 if (dd->pport[pidx].link_speed_supported)
5788 init_txdds_table(&dd->pport[pidx], 0);
Ralph Campbellf9315512010-05-23 21:44:54 -07005789 }
5790}
5791
Ralph Campbella77fcf82010-05-26 16:08:44 -07005792/* handle the txselect parameter changing */
5793static int setup_txselect(const char *str, struct kernel_param *kp)
Ralph Campbellf9315512010-05-23 21:44:54 -07005794{
5795 struct qib_devdata *dd;
5796 unsigned long val;
5797 char *n;
5798 if (strlen(str) >= MAX_ATTEN_LEN) {
Ralph Campbella77fcf82010-05-26 16:08:44 -07005799 printk(KERN_INFO QIB_DRV_NAME " txselect_values string "
Ralph Campbellf9315512010-05-23 21:44:54 -07005800 "too long\n");
5801 return -ENOSPC;
5802 }
5803 val = simple_strtoul(str, &n, 0);
Mike Marciniszyne7062032011-01-10 17:42:21 -08005804 if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
5805 TXDDS_MFG_SZ)) {
Ralph Campbellf9315512010-05-23 21:44:54 -07005806 printk(KERN_INFO QIB_DRV_NAME
Ralph Campbella77fcf82010-05-26 16:08:44 -07005807 "txselect_values must start with a number < %d\n",
Mike Marciniszyne7062032011-01-10 17:42:21 -08005808 TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
Ralph Campbellf9315512010-05-23 21:44:54 -07005809 return -EINVAL;
5810 }
Ralph Campbella77fcf82010-05-26 16:08:44 -07005811 strcpy(txselect_list, str);
Ralph Campbellf9315512010-05-23 21:44:54 -07005812
5813 list_for_each_entry(dd, &qib_dev_list, list)
Ralph Campbella77fcf82010-05-26 16:08:44 -07005814 if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
5815 set_no_qsfp_atten(dd, 1);
Ralph Campbellf9315512010-05-23 21:44:54 -07005816 return 0;
5817}
5818
5819/*
5820 * Write the final few registers that depend on some of the
5821 * init setup. Done late in init, just before bringing up
5822 * the serdes.
5823 */
5824static int qib_late_7322_initreg(struct qib_devdata *dd)
5825{
5826 int ret = 0, n;
5827 u64 val;
5828
5829 qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
5830 qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
5831 qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
5832 qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
5833 val = qib_read_kreg64(dd, kr_sendpioavailaddr);
5834 if (val != dd->pioavailregs_phys) {
5835 qib_dev_err(dd, "Catastrophic software error, "
5836 "SendPIOAvailAddr written as %lx, "
5837 "read back as %llx\n",
5838 (unsigned long) dd->pioavailregs_phys,
5839 (unsigned long long) val);
5840 ret = -EINVAL;
5841 }
5842
5843 n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
5844 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
5845 /* driver sends get pkey, lid, etc. checking also, to catch bugs */
5846 qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
5847
5848 qib_register_observer(dd, &sendctrl_0_observer);
5849 qib_register_observer(dd, &sendctrl_1_observer);
5850
5851 dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
5852 qib_write_kreg(dd, kr_control, dd->control);
5853 /*
5854 * Set SendDmaFetchPriority and init Tx params, including
5855 * QSFP handler on boards that have QSFP.
5856 * First set our default attenuation entry for cables that
5857 * don't have valid attenuation.
5858 */
5859 set_no_qsfp_atten(dd, 0);
5860 for (n = 0; n < dd->num_pports; ++n) {
5861 struct qib_pportdata *ppd = dd->pport + n;
5862
5863 qib_write_kreg_port(ppd, krp_senddmaprioritythld,
5864 sdma_fetch_prio & 0xf);
5865 /* Initialize qsfp if present on board. */
5866 if (dd->flags & QIB_HAS_QSFP)
5867 qib_init_7322_qsfp(ppd);
5868 }
5869 dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
5870 qib_write_kreg(dd, kr_control, dd->control);
5871
5872 return ret;
5873}
5874
5875/* per IB port errors. */
5876#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
5877 MASK_ACROSS(8, 15))
5878#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
5879#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
5880 MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
5881 MASK_ACROSS(0, 11))
5882
5883/*
5884 * Write the initialization per-port registers that need to be done at
5885 * driver load and after reset completes (i.e., that aren't done as part
5886 * of other init procedures called from qib_init.c).
5887 * Some of these should be redundant on reset, but play safe.
5888 */
5889static void write_7322_init_portregs(struct qib_pportdata *ppd)
5890{
5891 u64 val;
5892 int i;
5893
5894 if (!ppd->link_speed_supported) {
5895 /* no buffer credits for this port */
5896 for (i = 1; i < 8; i++)
5897 qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
5898 qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
5899 qib_write_kreg(ppd->dd, kr_scratch, 0);
5900 return;
5901 }
5902
5903 /*
5904 * Set the number of supported virtual lanes in IBC,
5905 * for flow control packet handling on unsupported VLs
5906 */
5907 val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
5908 val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
5909 val |= (u64)(ppd->vls_supported - 1) <<
5910 SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
5911 qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
5912
5913 qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
5914
5915 /* enable tx header checking */
5916 qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
5917 IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
5918 IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
5919
5920 qib_write_kreg_port(ppd, krp_ncmodectrl,
5921 SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
5922
5923 /*
5924 * Unconditionally clear the bufmask bits. If SDMA is
5925 * enabled, we'll set them appropriately later.
5926 */
5927 qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
5928 qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
5929 qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
5930 if (ppd->dd->cspec->r1)
5931 ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
5932}
5933
5934/*
5935 * Write the initialization per-device registers that need to be done at
5936 * driver load and after reset completes (i.e., that aren't done as part
5937 * of other init procedures called from qib_init.c). Also write per-port
5938 * registers that are affected by overall device config, such as QP mapping
5939 * Some of these should be redundant on reset, but play safe.
5940 */
5941static void write_7322_initregs(struct qib_devdata *dd)
5942{
5943 struct qib_pportdata *ppd;
5944 int i, pidx;
5945 u64 val;
5946
5947 /* Set Multicast QPs received by port 2 to map to context one. */
5948 qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
5949
5950 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5951 unsigned n, regno;
5952 unsigned long flags;
5953
Mike Marciniszyn2528ea62011-01-10 17:42:21 -08005954 if (dd->n_krcv_queues < 2 ||
5955 !dd->pport[pidx].link_speed_supported)
Ralph Campbellf9315512010-05-23 21:44:54 -07005956 continue;
5957
5958 ppd = &dd->pport[pidx];
5959
5960 /* be paranoid against later code motion, etc. */
5961 spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
5962 ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
5963 spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
5964
5965 /* Initialize QP to context mapping */
5966 regno = krp_rcvqpmaptable;
5967 val = 0;
5968 if (dd->num_pports > 1)
5969 n = dd->first_user_ctxt / dd->num_pports;
5970 else
5971 n = dd->first_user_ctxt - 1;
5972 for (i = 0; i < 32; ) {
5973 unsigned ctxt;
5974
5975 if (dd->num_pports > 1)
5976 ctxt = (i % n) * dd->num_pports + pidx;
5977 else if (i % n)
5978 ctxt = (i % n) + 1;
5979 else
5980 ctxt = ppd->hw_pidx;
5981 val |= ctxt << (5 * (i % 6));
5982 i++;
5983 if (i % 6 == 0) {
5984 qib_write_kreg_port(ppd, regno, val);
5985 val = 0;
5986 regno++;
5987 }
5988 }
5989 qib_write_kreg_port(ppd, regno, val);
5990 }
5991
5992 /*
5993 * Setup up interrupt mitigation for kernel contexts, but
5994 * not user contexts (user contexts use interrupts when
5995 * stalled waiting for any packet, so want those interrupts
5996 * right away).
5997 */
5998 for (i = 0; i < dd->first_user_ctxt; i++) {
5999 dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6000 qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6001 }
6002
6003 /*
6004 * Initialize as (disabled) rcvflow tables. Application code
6005 * will setup each flow as it uses the flow.
6006 * Doesn't clear any of the error bits that might be set.
6007 */
6008 val = TIDFLOW_ERRBITS; /* these are W1C */
Ralph Campbell0502f942010-07-21 22:46:11 +00006009 for (i = 0; i < dd->cfgctxts; i++) {
Ralph Campbellf9315512010-05-23 21:44:54 -07006010 int flow;
6011 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6012 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6013 }
6014
6015 /*
6016 * dual cards init to dual port recovery, single port cards to
6017 * the one port. Dual port cards may later adjust to 1 port,
6018 * and then back to dual port if both ports are connected
6019 * */
6020 if (dd->num_pports)
6021 setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6022}
6023
6024static int qib_init_7322_variables(struct qib_devdata *dd)
6025{
6026 struct qib_pportdata *ppd;
6027 unsigned features, pidx, sbufcnt;
6028 int ret, mtu;
6029 u32 sbufs, updthresh;
6030
6031 /* pport structs are contiguous, allocated after devdata */
6032 ppd = (struct qib_pportdata *)(dd + 1);
6033 dd->pport = ppd;
6034 ppd[0].dd = dd;
6035 ppd[1].dd = dd;
6036
6037 dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6038
6039 ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6040 ppd[1].cpspec = &ppd[0].cpspec[1];
6041 ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6042 ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6043
6044 spin_lock_init(&dd->cspec->rcvmod_lock);
6045 spin_lock_init(&dd->cspec->gpio_lock);
6046
6047 /* we haven't yet set QIB_PRESENT, so use read directly */
6048 dd->revision = readq(&dd->kregbase[kr_revision]);
6049
6050 if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6051 qib_dev_err(dd, "Revision register read failure, "
6052 "giving up initialization\n");
6053 ret = -ENODEV;
6054 goto bail;
6055 }
6056 dd->flags |= QIB_PRESENT; /* now register routines work */
6057
6058 dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6059 dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6060 dd->cspec->r1 = dd->minrev == 1;
6061
6062 get_7322_chip_params(dd);
6063 features = qib_7322_boardname(dd);
6064
6065 /* now that piobcnt2k and 4k set, we can allocate these */
6066 sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6067 NUM_VL15_BUFS + BITS_PER_LONG - 1;
6068 sbufcnt /= BITS_PER_LONG;
6069 dd->cspec->sendchkenable = kmalloc(sbufcnt *
6070 sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6071 dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6072 sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6073 dd->cspec->sendibchk = kmalloc(sbufcnt *
6074 sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6075 if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6076 !dd->cspec->sendibchk) {
6077 qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
6078 ret = -ENOMEM;
6079 goto bail;
6080 }
6081
6082 ppd = dd->pport;
6083
6084 /*
6085 * GPIO bits for TWSI data and clock,
6086 * used for serial EEPROM.
6087 */
6088 dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6089 dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6090 dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6091
6092 dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6093 QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6094 QIB_HAS_THRESH_UPDATE |
6095 (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6096 dd->flags |= qib_special_trigger ?
6097 QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6098
6099 /*
6100 * Setup initial values. These may change when PAT is enabled, but
6101 * we need these to do initial chip register accesses.
6102 */
6103 qib_7322_set_baseaddrs(dd);
6104
6105 mtu = ib_mtu_enum_to_int(qib_ibmtu);
6106 if (mtu == -1)
6107 mtu = QIB_DEFAULT_MTU;
6108
6109 dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6110 /* all hwerrors become interrupts, unless special purposed */
6111 dd->cspec->hwerrmask = ~0ULL;
6112 /* link_recovery setup causes these errors, so ignore them,
6113 * other than clearing them when they occur */
6114 dd->cspec->hwerrmask &=
6115 ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6116 SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6117 HWE_MASK(LATriggered));
6118
6119 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6120 struct qib_chippport_specific *cp = ppd->cpspec;
6121 ppd->link_speed_supported = features & PORT_SPD_CAP;
6122 features >>= PORT_SPD_CAP_SHIFT;
6123 if (!ppd->link_speed_supported) {
6124 /* single port mode (7340, or configured) */
6125 dd->skip_kctxt_mask |= 1 << pidx;
6126 if (pidx == 0) {
6127 /* Make sure port is disabled. */
6128 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6129 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6130 ppd[0] = ppd[1];
6131 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6132 IBSerdesPClkNotDetectMask_0)
6133 | SYM_MASK(HwErrMask,
6134 SDmaMemReadErrMask_0));
6135 dd->cspec->int_enable_mask &= ~(
6136 SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6137 SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6138 SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6139 SYM_MASK(IntMask, SDmaIntMask_0) |
6140 SYM_MASK(IntMask, ErrIntMask_0) |
6141 SYM_MASK(IntMask, SendDoneIntMask_0));
6142 } else {
6143 /* Make sure port is disabled. */
6144 qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6145 qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6146 dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6147 IBSerdesPClkNotDetectMask_1)
6148 | SYM_MASK(HwErrMask,
6149 SDmaMemReadErrMask_1));
6150 dd->cspec->int_enable_mask &= ~(
6151 SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6152 SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6153 SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6154 SYM_MASK(IntMask, SDmaIntMask_1) |
6155 SYM_MASK(IntMask, ErrIntMask_1) |
6156 SYM_MASK(IntMask, SendDoneIntMask_1));
6157 }
6158 continue;
6159 }
6160
6161 dd->num_pports++;
6162 qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6163
6164 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6165 ppd->link_width_enabled = IB_WIDTH_4X;
6166 ppd->link_speed_enabled = ppd->link_speed_supported;
6167 /*
6168 * Set the initial values to reasonable default, will be set
6169 * for real when link is up.
6170 */
6171 ppd->link_width_active = IB_WIDTH_4X;
6172 ppd->link_speed_active = QIB_IB_SDR;
6173 ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6174 switch (qib_num_cfg_vls) {
6175 case 1:
6176 ppd->vls_supported = IB_VL_VL0;
6177 break;
6178 case 2:
6179 ppd->vls_supported = IB_VL_VL0_1;
6180 break;
6181 default:
6182 qib_devinfo(dd->pcidev,
6183 "Invalid num_vls %u, using 4 VLs\n",
6184 qib_num_cfg_vls);
6185 qib_num_cfg_vls = 4;
6186 /* fall through */
6187 case 4:
6188 ppd->vls_supported = IB_VL_VL0_3;
6189 break;
6190 case 8:
6191 if (mtu <= 2048)
6192 ppd->vls_supported = IB_VL_VL0_7;
6193 else {
6194 qib_devinfo(dd->pcidev,
6195 "Invalid num_vls %u for MTU %d "
6196 ", using 4 VLs\n",
6197 qib_num_cfg_vls, mtu);
6198 ppd->vls_supported = IB_VL_VL0_3;
6199 qib_num_cfg_vls = 4;
6200 }
6201 break;
6202 }
6203 ppd->vls_operational = ppd->vls_supported;
6204
6205 init_waitqueue_head(&cp->autoneg_wait);
6206 INIT_DELAYED_WORK(&cp->autoneg_work,
6207 autoneg_7322_work);
6208 if (ppd->dd->cspec->r1)
6209 INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6210
6211 /*
6212 * For Mez and similar cards, no qsfp info, so do
6213 * the "cable info" setup here. Can be overridden
6214 * in adapter-specific routines.
6215 */
Ralph Campbell7c7a4162010-06-17 23:14:09 +00006216 if (!(dd->flags & QIB_HAS_QSFP)) {
6217 if (!IS_QMH(dd) && !IS_QME(dd))
6218 qib_devinfo(dd->pcidev, "IB%u:%u: "
Ralph Campbellf9315512010-05-23 21:44:54 -07006219 "Unknown mezzanine card type\n",
Ralph Campbella77fcf82010-05-26 16:08:44 -07006220 dd->unit, ppd->port);
6221 cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
Ralph Campbellf9315512010-05-23 21:44:54 -07006222 /*
Ralph Campbella77fcf82010-05-26 16:08:44 -07006223 * Choose center value as default tx serdes setting
6224 * until changed through module parameter.
Ralph Campbellf9315512010-05-23 21:44:54 -07006225 */
Ralph Campbella77fcf82010-05-26 16:08:44 -07006226 ppd->cpspec->no_eep = IS_QMH(dd) ?
6227 TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
Ralph Campbellf9315512010-05-23 21:44:54 -07006228 } else
6229 cp->h1_val = H1_FORCE_VAL;
6230
6231 /* Avoid writes to chip for mini_init */
6232 if (!qib_mini_init)
6233 write_7322_init_portregs(ppd);
6234
6235 init_timer(&cp->chase_timer);
6236 cp->chase_timer.function = reenable_chase;
6237 cp->chase_timer.data = (unsigned long)ppd;
6238
6239 ppd++;
6240 }
6241
Mike Marciniszyn0a43e112011-01-10 17:42:19 -08006242 dd->rcvhdrentsize = qib_rcvhdrentsize ?
6243 qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6244 dd->rcvhdrsize = qib_rcvhdrsize ?
6245 qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
Ralph Campbella77fcf82010-05-26 16:08:44 -07006246 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
Ralph Campbellf9315512010-05-23 21:44:54 -07006247
6248 /* we always allocate at least 2048 bytes for eager buffers */
6249 dd->rcvegrbufsize = max(mtu, 2048);
Mike Marciniszyn9e1c0e42011-09-23 13:16:39 -04006250 BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
6251 dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
Ralph Campbellf9315512010-05-23 21:44:54 -07006252
6253 qib_7322_tidtemplate(dd);
6254
6255 /*
6256 * We can request a receive interrupt for 1 or
6257 * more packets from current offset.
6258 */
6259 dd->rhdrhead_intr_off =
6260 (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6261
6262 /* setup the stats timer; the add_timer is done at end of init */
6263 init_timer(&dd->stats_timer);
6264 dd->stats_timer.function = qib_get_7322_faststats;
6265 dd->stats_timer.data = (unsigned long) dd;
6266
6267 dd->ureg_align = 0x10000; /* 64KB alignment */
6268
6269 dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6270
6271 qib_7322_config_ctxts(dd);
6272 qib_set_ctxtcnt(dd);
6273
6274 if (qib_wc_pat) {
Dave Olsonfce24a92010-06-17 23:13:44 +00006275 resource_size_t vl15off;
6276 /*
6277 * We do not set WC on the VL15 buffers to avoid
6278 * a rare problem with unaligned writes from
6279 * interrupt-flushed store buffers, so we need
6280 * to map those separately here. We can't solve
6281 * this for the rarely used mtrr case.
6282 */
6283 ret = init_chip_wc_pat(dd, 0);
Ralph Campbellf9315512010-05-23 21:44:54 -07006284 if (ret)
6285 goto bail;
Dave Olsonfce24a92010-06-17 23:13:44 +00006286
6287 /* vl15 buffers start just after the 4k buffers */
6288 vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6289 dd->piobcnt4k * dd->align4k;
6290 dd->piovl15base = ioremap_nocache(vl15off,
6291 NUM_VL15_BUFS * dd->align4k);
6292 if (!dd->piovl15base)
6293 goto bail;
Ralph Campbellf9315512010-05-23 21:44:54 -07006294 }
6295 qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6296
6297 ret = 0;
6298 if (qib_mini_init)
6299 goto bail;
6300 if (!dd->num_pports) {
6301 qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6302 goto bail; /* no error, so can still figure out why err */
6303 }
6304
6305 write_7322_initregs(dd);
6306 ret = qib_create_ctxts(dd);
6307 init_7322_cntrnames(dd);
6308
6309 updthresh = 8U; /* update threshold */
6310
6311 /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6312 * reserve the update threshold amount for other kernel use, such
6313 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6314 * unless we aren't enabling SDMA, in which case we want to use
6315 * all the 4k bufs for the kernel.
6316 * if this was less than the update threshold, we could wait
6317 * a long time for an update. Coded this way because we
6318 * sometimes change the update threshold for various reasons,
6319 * and we want this to remain robust.
6320 */
6321 if (dd->flags & QIB_HAS_SEND_DMA) {
6322 dd->cspec->sdmabufcnt = dd->piobcnt4k;
6323 sbufs = updthresh > 3 ? updthresh : 3;
6324 } else {
6325 dd->cspec->sdmabufcnt = 0;
6326 sbufs = dd->piobcnt4k;
6327 }
6328 dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6329 dd->cspec->sdmabufcnt;
6330 dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6331 dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6332 dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6333 dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6334
6335 /*
6336 * If we have 16 user contexts, we will have 7 sbufs
6337 * per context, so reduce the update threshold to match. We
6338 * want to update before we actually run out, at low pbufs/ctxt
6339 * so give ourselves some margin.
6340 */
6341 if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6342 updthresh = dd->pbufsctxt - 2;
6343 dd->cspec->updthresh_dflt = updthresh;
6344 dd->cspec->updthresh = updthresh;
6345
6346 /* before full enable, no interrupts, no locking needed */
6347 dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6348 << SYM_LSB(SendCtrl, AvailUpdThld)) |
6349 SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6350
6351 dd->psxmitwait_supported = 1;
6352 dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6353bail:
6354 if (!dd->ctxtcnt)
6355 dd->ctxtcnt = 1; /* for other initialization code */
6356
6357 return ret;
6358}
6359
6360static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6361 u32 *pbufnum)
6362{
6363 u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6364 struct qib_devdata *dd = ppd->dd;
6365
6366 /* last is same for 2k and 4k, because we use 4k if all 2k busy */
6367 if (pbc & PBC_7322_VL15_SEND) {
6368 first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6369 last = first;
6370 } else {
6371 if ((plen + 1) > dd->piosize2kmax_dwords)
6372 first = dd->piobcnt2k;
6373 else
6374 first = 0;
6375 last = dd->cspec->lastbuf_for_pio;
6376 }
6377 return qib_getsendbuf_range(dd, pbufnum, first, last);
6378}
6379
6380static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6381 u32 start)
6382{
6383 qib_write_kreg_port(ppd, krp_psinterval, intv);
6384 qib_write_kreg_port(ppd, krp_psstart, start);
6385}
6386
6387/*
6388 * Must be called with sdma_lock held, or before init finished.
6389 */
6390static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6391{
6392 qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6393}
6394
6395static struct sdma_set_state_action sdma_7322_action_table[] = {
6396 [qib_sdma_state_s00_hw_down] = {
6397 .go_s99_running_tofalse = 1,
6398 .op_enable = 0,
6399 .op_intenable = 0,
6400 .op_halt = 0,
6401 .op_drain = 0,
6402 },
6403 [qib_sdma_state_s10_hw_start_up_wait] = {
6404 .op_enable = 0,
6405 .op_intenable = 1,
6406 .op_halt = 1,
6407 .op_drain = 0,
6408 },
6409 [qib_sdma_state_s20_idle] = {
6410 .op_enable = 1,
6411 .op_intenable = 1,
6412 .op_halt = 1,
6413 .op_drain = 0,
6414 },
6415 [qib_sdma_state_s30_sw_clean_up_wait] = {
6416 .op_enable = 0,
6417 .op_intenable = 1,
6418 .op_halt = 1,
6419 .op_drain = 0,
6420 },
6421 [qib_sdma_state_s40_hw_clean_up_wait] = {
6422 .op_enable = 1,
6423 .op_intenable = 1,
6424 .op_halt = 1,
6425 .op_drain = 0,
6426 },
6427 [qib_sdma_state_s50_hw_halt_wait] = {
6428 .op_enable = 1,
6429 .op_intenable = 1,
6430 .op_halt = 1,
6431 .op_drain = 1,
6432 },
6433 [qib_sdma_state_s99_running] = {
6434 .op_enable = 1,
6435 .op_intenable = 1,
6436 .op_halt = 0,
6437 .op_drain = 0,
6438 .go_s99_running_totrue = 1,
6439 },
6440};
6441
6442static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6443{
6444 ppd->sdma_state.set_state_action = sdma_7322_action_table;
6445}
6446
6447static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6448{
6449 struct qib_devdata *dd = ppd->dd;
6450 unsigned lastbuf, erstbuf;
6451 u64 senddmabufmask[3] = { 0 };
6452 int n, ret = 0;
6453
6454 qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6455 qib_sdma_7322_setlengen(ppd);
6456 qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6457 qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6458 qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6459 qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6460
6461 if (dd->num_pports)
6462 n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6463 else
6464 n = dd->cspec->sdmabufcnt; /* failsafe for init */
6465 erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6466 ((dd->num_pports == 1 || ppd->port == 2) ? n :
6467 dd->cspec->sdmabufcnt);
6468 lastbuf = erstbuf + n;
6469
6470 ppd->sdma_state.first_sendbuf = erstbuf;
6471 ppd->sdma_state.last_sendbuf = lastbuf;
6472 for (; erstbuf < lastbuf; ++erstbuf) {
6473 unsigned word = erstbuf / BITS_PER_LONG;
6474 unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6475
6476 BUG_ON(word >= 3);
6477 senddmabufmask[word] |= 1ULL << bit;
6478 }
6479 qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6480 qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6481 qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6482 return ret;
6483}
6484
6485/* sdma_lock must be held */
6486static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6487{
6488 struct qib_devdata *dd = ppd->dd;
6489 int sane;
6490 int use_dmahead;
6491 u16 swhead;
6492 u16 swtail;
6493 u16 cnt;
6494 u16 hwhead;
6495
6496 use_dmahead = __qib_sdma_running(ppd) &&
6497 (dd->flags & QIB_HAS_SDMA_TIMEOUT);
6498retry:
6499 hwhead = use_dmahead ?
6500 (u16) le64_to_cpu(*ppd->sdma_head_dma) :
6501 (u16) qib_read_kreg_port(ppd, krp_senddmahead);
6502
6503 swhead = ppd->sdma_descq_head;
6504 swtail = ppd->sdma_descq_tail;
6505 cnt = ppd->sdma_descq_cnt;
6506
6507 if (swhead < swtail)
6508 /* not wrapped */
6509 sane = (hwhead >= swhead) & (hwhead <= swtail);
6510 else if (swhead > swtail)
6511 /* wrapped around */
6512 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6513 (hwhead <= swtail);
6514 else
6515 /* empty */
6516 sane = (hwhead == swhead);
6517
6518 if (unlikely(!sane)) {
6519 if (use_dmahead) {
6520 /* try one more time, directly from the register */
6521 use_dmahead = 0;
6522 goto retry;
6523 }
6524 /* proceed as if no progress */
6525 hwhead = swhead;
6526 }
6527
6528 return hwhead;
6529}
6530
6531static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6532{
6533 u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6534
6535 return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6536 (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6537 !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6538 !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6539}
6540
6541/*
6542 * Compute the amount of delay before sending the next packet if the
6543 * port's send rate differs from the static rate set for the QP.
6544 * The delay affects the next packet and the amount of the delay is
6545 * based on the length of the this packet.
6546 */
6547static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6548 u8 srate, u8 vl)
6549{
6550 u8 snd_mult = ppd->delay_mult;
6551 u8 rcv_mult = ib_rate_to_delay[srate];
6552 u32 ret;
6553
6554 ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6555
6556 /* Indicate VL15, else set the VL in the control word */
6557 if (vl == 15)
6558 ret |= PBC_7322_VL15_SEND_CTRL;
6559 else
6560 ret |= vl << PBC_VL_NUM_LSB;
6561 ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6562
6563 return ret;
6564}
6565
6566/*
6567 * Enable the per-port VL15 send buffers for use.
6568 * They follow the rest of the buffers, without a config parameter.
6569 * This was in initregs, but that is done before the shadow
6570 * is set up, and this has to be done after the shadow is
6571 * set up.
6572 */
6573static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
6574{
6575 unsigned vl15bufs;
6576
6577 vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
6578 qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
6579 TXCHK_CHG_TYPE_KERN, NULL);
6580}
6581
6582static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
6583{
6584 if (rcd->ctxt < NUM_IB_PORTS) {
6585 if (rcd->dd->num_pports > 1) {
6586 rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
6587 rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
6588 } else {
6589 rcd->rcvegrcnt = KCTXT0_EGRCNT;
6590 rcd->rcvegr_tid_base = 0;
6591 }
6592 } else {
6593 rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
6594 rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
6595 (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
6596 }
6597}
6598
6599#define QTXSLEEPS 5000
6600static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
6601 u32 len, u32 which, struct qib_ctxtdata *rcd)
6602{
6603 int i;
6604 const int last = start + len - 1;
6605 const int lastr = last / BITS_PER_LONG;
6606 u32 sleeps = 0;
6607 int wait = rcd != NULL;
6608 unsigned long flags;
6609
6610 while (wait) {
6611 unsigned long shadow;
6612 int cstart, previ = -1;
6613
6614 /*
6615 * when flipping from kernel to user, we can't change
6616 * the checking type if the buffer is allocated to the
6617 * driver. It's OK the other direction, because it's
6618 * from close, and we have just disarm'ed all the
6619 * buffers. All the kernel to kernel changes are also
6620 * OK.
6621 */
6622 for (cstart = start; cstart <= last; cstart++) {
6623 i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6624 / BITS_PER_LONG;
6625 if (i != previ) {
6626 shadow = (unsigned long)
6627 le64_to_cpu(dd->pioavailregs_dma[i]);
6628 previ = i;
6629 }
6630 if (test_bit(((2 * cstart) +
6631 QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
6632 % BITS_PER_LONG, &shadow))
6633 break;
6634 }
6635
6636 if (cstart > last)
6637 break;
6638
6639 if (sleeps == QTXSLEEPS)
6640 break;
6641 /* make sure we see an updated copy next time around */
6642 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6643 sleeps++;
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08006644 msleep(20);
Ralph Campbellf9315512010-05-23 21:44:54 -07006645 }
6646
6647 switch (which) {
6648 case TXCHK_CHG_TYPE_DIS1:
6649 /*
6650 * disable checking on a range; used by diags; just
6651 * one buffer, but still written generically
6652 */
6653 for (i = start; i <= last; i++)
6654 clear_bit(i, dd->cspec->sendchkenable);
6655 break;
6656
6657 case TXCHK_CHG_TYPE_ENAB1:
6658 /*
6659 * (re)enable checking on a range; used by diags; just
6660 * one buffer, but still written generically; read
6661 * scratch to be sure buffer actually triggered, not
6662 * just flushed from processor.
6663 */
6664 qib_read_kreg32(dd, kr_scratch);
6665 for (i = start; i <= last; i++)
6666 set_bit(i, dd->cspec->sendchkenable);
6667 break;
6668
6669 case TXCHK_CHG_TYPE_KERN:
6670 /* usable by kernel */
6671 for (i = start; i <= last; i++) {
6672 set_bit(i, dd->cspec->sendibchk);
6673 clear_bit(i, dd->cspec->sendgrhchk);
6674 }
6675 spin_lock_irqsave(&dd->uctxt_lock, flags);
6676 /* see if we need to raise avail update threshold */
6677 for (i = dd->first_user_ctxt;
6678 dd->cspec->updthresh != dd->cspec->updthresh_dflt
6679 && i < dd->cfgctxts; i++)
6680 if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
6681 ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
6682 < dd->cspec->updthresh_dflt)
6683 break;
6684 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
6685 if (i == dd->cfgctxts) {
6686 spin_lock_irqsave(&dd->sendctrl_lock, flags);
6687 dd->cspec->updthresh = dd->cspec->updthresh_dflt;
6688 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6689 dd->sendctrl |= (dd->cspec->updthresh &
6690 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
6691 SYM_LSB(SendCtrl, AvailUpdThld);
6692 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6693 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6694 }
6695 break;
6696
6697 case TXCHK_CHG_TYPE_USER:
6698 /* for user process */
6699 for (i = start; i <= last; i++) {
6700 clear_bit(i, dd->cspec->sendibchk);
6701 set_bit(i, dd->cspec->sendgrhchk);
6702 }
6703 spin_lock_irqsave(&dd->sendctrl_lock, flags);
6704 if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
6705 / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
6706 dd->cspec->updthresh = (rcd->piocnt /
6707 rcd->subctxt_cnt) - 1;
6708 dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
6709 dd->sendctrl |= (dd->cspec->updthresh &
6710 SYM_RMASK(SendCtrl, AvailUpdThld))
6711 << SYM_LSB(SendCtrl, AvailUpdThld);
6712 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6713 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6714 } else
6715 spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
6716 break;
6717
6718 default:
6719 break;
6720 }
6721
6722 for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
6723 qib_write_kreg(dd, kr_sendcheckmask + i,
6724 dd->cspec->sendchkenable[i]);
6725
6726 for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
6727 qib_write_kreg(dd, kr_sendgrhcheckmask + i,
6728 dd->cspec->sendgrhchk[i]);
6729 qib_write_kreg(dd, kr_sendibpktmask + i,
6730 dd->cspec->sendibchk[i]);
6731 }
6732
6733 /*
6734 * Be sure whatever we did was seen by the chip and acted upon,
6735 * before we return. Mostly important for which >= 2.
6736 */
6737 qib_read_kreg32(dd, kr_scratch);
6738}
6739
6740
6741/* useful for trigger analyzers, etc. */
6742static void writescratch(struct qib_devdata *dd, u32 val)
6743{
6744 qib_write_kreg(dd, kr_scratch, val);
6745}
6746
6747/* Dummy for now, use chip regs soon */
6748static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
6749{
6750 return -ENXIO;
6751}
6752
6753/**
6754 * qib_init_iba7322_funcs - set up the chip-specific function pointers
6755 * @dev: the pci_dev for qlogic_ib device
6756 * @ent: pci_device_id struct for this dev
6757 *
6758 * Also allocates, inits, and returns the devdata struct for this
6759 * device instance
6760 *
6761 * This is global, and is called directly at init to set up the
6762 * chip-specific function pointers for later use.
6763 */
6764struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6765 const struct pci_device_id *ent)
6766{
6767 struct qib_devdata *dd;
6768 int ret, i;
6769 u32 tabsize, actual_cnt = 0;
6770
6771 dd = qib_alloc_devdata(pdev,
6772 NUM_IB_PORTS * sizeof(struct qib_pportdata) +
6773 sizeof(struct qib_chip_specific) +
6774 NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
6775 if (IS_ERR(dd))
6776 goto bail;
6777
6778 dd->f_bringup_serdes = qib_7322_bringup_serdes;
6779 dd->f_cleanup = qib_setup_7322_cleanup;
6780 dd->f_clear_tids = qib_7322_clear_tids;
6781 dd->f_free_irq = qib_7322_free_irq;
6782 dd->f_get_base_info = qib_7322_get_base_info;
6783 dd->f_get_msgheader = qib_7322_get_msgheader;
6784 dd->f_getsendbuf = qib_7322_getsendbuf;
6785 dd->f_gpio_mod = gpio_7322_mod;
6786 dd->f_eeprom_wen = qib_7322_eeprom_wen;
6787 dd->f_hdrqempty = qib_7322_hdrqempty;
6788 dd->f_ib_updown = qib_7322_ib_updown;
6789 dd->f_init_ctxt = qib_7322_init_ctxt;
6790 dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
6791 dd->f_intr_fallback = qib_7322_intr_fallback;
6792 dd->f_late_initreg = qib_late_7322_initreg;
6793 dd->f_setpbc_control = qib_7322_setpbc_control;
6794 dd->f_portcntr = qib_portcntr_7322;
6795 dd->f_put_tid = qib_7322_put_tid;
6796 dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
6797 dd->f_rcvctrl = rcvctrl_7322_mod;
6798 dd->f_read_cntrs = qib_read_7322cntrs;
6799 dd->f_read_portcntrs = qib_read_7322portcntrs;
6800 dd->f_reset = qib_do_7322_reset;
6801 dd->f_init_sdma_regs = init_sdma_7322_regs;
6802 dd->f_sdma_busy = qib_sdma_7322_busy;
6803 dd->f_sdma_gethead = qib_sdma_7322_gethead;
6804 dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
6805 dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
6806 dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
6807 dd->f_sendctrl = sendctrl_7322_mod;
6808 dd->f_set_armlaunch = qib_set_7322_armlaunch;
6809 dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
6810 dd->f_iblink_state = qib_7322_iblink_state;
6811 dd->f_ibphys_portstate = qib_7322_phys_portstate;
6812 dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
6813 dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
6814 dd->f_set_ib_loopback = qib_7322_set_loopback;
6815 dd->f_get_ib_table = qib_7322_get_ib_table;
6816 dd->f_set_ib_table = qib_7322_set_ib_table;
6817 dd->f_set_intr_state = qib_7322_set_intr_state;
6818 dd->f_setextled = qib_setup_7322_setextled;
6819 dd->f_txchk_change = qib_7322_txchk_change;
6820 dd->f_update_usrhead = qib_update_7322_usrhead;
6821 dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
6822 dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
6823 dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
6824 dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
6825 dd->f_sdma_init_early = qib_7322_sdma_init_early;
6826 dd->f_writescratch = writescratch;
6827 dd->f_tempsense_rd = qib_7322_tempsense_rd;
6828 /*
6829 * Do remaining PCIe setup and save PCIe values in dd.
6830 * Any error printing is already done by the init code.
6831 * On return, we have the chip mapped, but chip registers
6832 * are not set up until start of qib_init_7322_variables.
6833 */
6834 ret = qib_pcie_ddinit(dd, pdev, ent);
6835 if (ret < 0)
6836 goto bail_free;
6837
6838 /* initialize chip-specific variables */
6839 ret = qib_init_7322_variables(dd);
6840 if (ret)
6841 goto bail_cleanup;
6842
6843 if (qib_mini_init || !dd->num_pports)
6844 goto bail;
6845
6846 /*
6847 * Determine number of vectors we want; depends on port count
6848 * and number of configured kernel receive queues actually used.
6849 * Should also depend on whether sdma is enabled or not, but
6850 * that's such a rare testing case it's not worth worrying about.
6851 */
6852 tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
6853 for (i = 0; i < tabsize; i++)
6854 if ((i < ARRAY_SIZE(irq_table) &&
6855 irq_table[i].port <= dd->num_pports) ||
6856 (i >= ARRAY_SIZE(irq_table) &&
6857 dd->rcd[i - ARRAY_SIZE(irq_table)]))
6858 actual_cnt++;
Mike Marciniszyne67306a2011-07-21 13:21:16 +00006859 /* reduce by ctxt's < 2 */
6860 if (qib_krcvq01_no_msi)
6861 actual_cnt -= dd->num_pports;
6862
Ralph Campbellf9315512010-05-23 21:44:54 -07006863 tabsize = actual_cnt;
6864 dd->cspec->msix_entries = kmalloc(tabsize *
6865 sizeof(struct msix_entry), GFP_KERNEL);
6866 dd->cspec->msix_arg = kmalloc(tabsize *
6867 sizeof(void *), GFP_KERNEL);
6868 if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
6869 qib_dev_err(dd, "No memory for MSIx table\n");
6870 tabsize = 0;
6871 }
6872 for (i = 0; i < tabsize; i++)
6873 dd->cspec->msix_entries[i].entry = i;
6874
6875 if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
6876 qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
6877 "continuing anyway\n");
6878 /* may be less than we wanted, if not enough available */
6879 dd->cspec->num_msix_entries = tabsize;
6880
6881 /* setup interrupt handler */
6882 qib_setup_7322_interrupt(dd, 1);
6883
6884 /* clear diagctrl register, in case diags were running and crashed */
6885 qib_write_kreg(dd, kr_hwdiagctrl, 0);
6886
Ralph Campbellf9315512010-05-23 21:44:54 -07006887 goto bail;
6888
6889bail_cleanup:
6890 qib_pcie_ddcleanup(dd);
6891bail_free:
6892 qib_free_devdata(dd);
6893 dd = ERR_PTR(ret);
6894bail:
6895 return dd;
6896}
6897
6898/*
6899 * Set the table entry at the specified index from the table specifed.
6900 * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
6901 * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
6902 * 'idx' below addresses the correct entry, while its 4 LSBs select the
6903 * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
6904 */
6905#define DDS_ENT_AMP_LSB 14
6906#define DDS_ENT_MAIN_LSB 9
6907#define DDS_ENT_POST_LSB 5
6908#define DDS_ENT_PRE_XTRA_LSB 3
6909#define DDS_ENT_PRE_LSB 0
6910
6911/*
6912 * Set one entry in the TxDDS table for spec'd port
6913 * ridx picks one of the entries, while tp points
6914 * to the appropriate table entry.
6915 */
6916static void set_txdds(struct qib_pportdata *ppd, int ridx,
6917 const struct txdds_ent *tp)
6918{
6919 struct qib_devdata *dd = ppd->dd;
6920 u32 pack_ent;
6921 int regidx;
6922
6923 /* Get correct offset in chip-space, and in source table */
6924 regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
6925 /*
6926 * We do not use qib_write_kreg_port() because it was intended
6927 * only for registers in the lower "port specific" pages.
6928 * So do index calculation by hand.
6929 */
6930 if (ppd->hw_pidx)
6931 regidx += (dd->palign / sizeof(u64));
6932
6933 pack_ent = tp->amp << DDS_ENT_AMP_LSB;
6934 pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
6935 pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
6936 pack_ent |= tp->post << DDS_ENT_POST_LSB;
6937 qib_write_kreg(dd, regidx, pack_ent);
6938 /* Prevent back-to-back writes by hitting scratch */
6939 qib_write_kreg(ppd->dd, kr_scratch, 0);
6940}
6941
6942static const struct vendor_txdds_ent vendor_txdds[] = {
6943 { /* Amphenol 1m 30awg NoEq */
6944 { 0x41, 0x50, 0x48 }, "584470002 ",
6945 { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
6946 },
6947 { /* Amphenol 3m 28awg NoEq */
6948 { 0x41, 0x50, 0x48 }, "584470004 ",
6949 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
6950 },
6951 { /* Finisar 3m OM2 Optical */
6952 { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
6953 { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
6954 },
6955 { /* Finisar 30m OM2 Optical */
6956 { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
6957 { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
6958 },
6959 { /* Finisar Default OM2 Optical */
6960 { 0x00, 0x90, 0x65 }, NULL,
6961 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
6962 },
6963 { /* Gore 1m 30awg NoEq */
6964 { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
6965 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
6966 },
6967 { /* Gore 2m 30awg NoEq */
6968 { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
6969 { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
6970 },
6971 { /* Gore 1m 28awg NoEq */
6972 { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
6973 { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
6974 },
6975 { /* Gore 3m 28awg NoEq */
6976 { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
6977 { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
6978 },
6979 { /* Gore 5m 24awg Eq */
6980 { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
6981 { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
6982 },
6983 { /* Gore 7m 24awg Eq */
6984 { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
6985 { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
6986 },
6987 { /* Gore 5m 26awg Eq */
6988 { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
6989 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
6990 },
6991 { /* Gore 7m 26awg Eq */
6992 { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
6993 { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
6994 },
6995 { /* Intersil 12m 24awg Active */
6996 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
6997 { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
6998 },
6999 { /* Intersil 10m 28awg Active */
7000 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7001 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
7002 },
7003 { /* Intersil 7m 30awg Active */
7004 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7005 { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
7006 },
7007 { /* Intersil 5m 32awg Active */
7008 { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7009 { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
7010 },
7011 { /* Intersil Default Active */
7012 { 0x00, 0x30, 0xB4 }, NULL,
7013 { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
7014 },
7015 { /* Luxtera 20m Active Optical */
7016 { 0x00, 0x25, 0x63 }, NULL,
7017 { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
7018 },
7019 { /* Molex 1M Cu loopback */
7020 { 0x00, 0x09, 0x3A }, "74763-0025 ",
7021 { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
7022 },
7023 { /* Molex 2m 28awg NoEq */
7024 { 0x00, 0x09, 0x3A }, "74757-2201 ",
7025 { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
7026 },
7027};
7028
7029static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7030 /* amp, pre, main, post */
7031 { 2, 2, 15, 6 }, /* Loopback */
7032 { 0, 0, 0, 1 }, /* 2 dB */
7033 { 0, 0, 0, 2 }, /* 3 dB */
7034 { 0, 0, 0, 3 }, /* 4 dB */
7035 { 0, 0, 0, 4 }, /* 5 dB */
7036 { 0, 0, 0, 5 }, /* 6 dB */
7037 { 0, 0, 0, 6 }, /* 7 dB */
7038 { 0, 0, 0, 7 }, /* 8 dB */
7039 { 0, 0, 0, 8 }, /* 9 dB */
7040 { 0, 0, 0, 9 }, /* 10 dB */
7041 { 0, 0, 0, 10 }, /* 11 dB */
7042 { 0, 0, 0, 11 }, /* 12 dB */
7043 { 0, 0, 0, 12 }, /* 13 dB */
7044 { 0, 0, 0, 13 }, /* 14 dB */
7045 { 0, 0, 0, 14 }, /* 15 dB */
7046 { 0, 0, 0, 15 }, /* 16 dB */
7047};
7048
7049static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7050 /* amp, pre, main, post */
7051 { 2, 2, 15, 6 }, /* Loopback */
7052 { 0, 0, 0, 8 }, /* 2 dB */
7053 { 0, 0, 0, 8 }, /* 3 dB */
7054 { 0, 0, 0, 9 }, /* 4 dB */
7055 { 0, 0, 0, 9 }, /* 5 dB */
7056 { 0, 0, 0, 10 }, /* 6 dB */
7057 { 0, 0, 0, 10 }, /* 7 dB */
7058 { 0, 0, 0, 11 }, /* 8 dB */
7059 { 0, 0, 0, 11 }, /* 9 dB */
7060 { 0, 0, 0, 12 }, /* 10 dB */
7061 { 0, 0, 0, 12 }, /* 11 dB */
7062 { 0, 0, 0, 13 }, /* 12 dB */
7063 { 0, 0, 0, 13 }, /* 13 dB */
7064 { 0, 0, 0, 14 }, /* 14 dB */
7065 { 0, 0, 0, 14 }, /* 15 dB */
7066 { 0, 0, 0, 15 }, /* 16 dB */
7067};
7068
7069static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7070 /* amp, pre, main, post */
7071 { 2, 2, 15, 6 }, /* Loopback */
Ralph Campbella77fcf82010-05-26 16:08:44 -07007072 { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
7073 { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
Ralph Campbellf9315512010-05-23 21:44:54 -07007074 { 0, 1, 0, 11 }, /* 4 dB */
7075 { 0, 1, 0, 13 }, /* 5 dB */
7076 { 0, 1, 0, 15 }, /* 6 dB */
7077 { 0, 1, 3, 15 }, /* 7 dB */
7078 { 0, 1, 7, 15 }, /* 8 dB */
7079 { 0, 1, 7, 15 }, /* 9 dB */
7080 { 0, 1, 8, 15 }, /* 10 dB */
7081 { 0, 1, 9, 15 }, /* 11 dB */
7082 { 0, 1, 10, 15 }, /* 12 dB */
7083 { 0, 2, 6, 15 }, /* 13 dB */
7084 { 0, 2, 7, 15 }, /* 14 dB */
7085 { 0, 2, 8, 15 }, /* 15 dB */
7086 { 0, 2, 9, 15 }, /* 16 dB */
7087};
7088
Ralph Campbella77fcf82010-05-26 16:08:44 -07007089/*
7090 * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7091 * These are mostly used for mez cards going through connectors
7092 * and backplane traces, but can be used to add other "unusual"
7093 * table values as well.
7094 */
7095static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7096 /* amp, pre, main, post */
7097 { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
7098 { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
7099 { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
7100 { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
7101 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7102 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7103 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7104 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7105 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7106 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
7107 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
Ralph Campbell7c7a4162010-06-17 23:14:09 +00007108 { 0, 0, 0, 3 }, /* QMH7342 backplane settings */
7109 { 0, 0, 0, 4 }, /* QMH7342 backplane settings */
Ralph Campbella77fcf82010-05-26 16:08:44 -07007110};
7111
7112static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7113 /* amp, pre, main, post */
7114 { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
7115 { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
7116 { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
7117 { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
7118 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7119 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7120 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7121 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7122 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7123 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
7124 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
Ralph Campbell7c7a4162010-06-17 23:14:09 +00007125 { 0, 0, 0, 9 }, /* QMH7342 backplane settings */
7126 { 0, 0, 0, 10 }, /* QMH7342 backplane settings */
Ralph Campbella77fcf82010-05-26 16:08:44 -07007127};
7128
7129static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7130 /* amp, pre, main, post */
7131 { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
7132 { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
7133 { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
7134 { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
7135 { 0, 1, 12, 10 }, /* QME7342 backplane setting */
7136 { 0, 1, 12, 11 }, /* QME7342 backplane setting */
7137 { 0, 1, 12, 12 }, /* QME7342 backplane setting */
7138 { 0, 1, 12, 14 }, /* QME7342 backplane setting */
7139 { 0, 1, 12, 6 }, /* QME7342 backplane setting */
7140 { 0, 1, 12, 7 }, /* QME7342 backplane setting */
7141 { 0, 1, 12, 8 }, /* QME7342 backplane setting */
Ralph Campbell7c7a4162010-06-17 23:14:09 +00007142 { 0, 1, 0, 10 }, /* QMH7342 backplane settings */
7143 { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
Ralph Campbella77fcf82010-05-26 16:08:44 -07007144};
7145
Mike Marciniszyne7062032011-01-10 17:42:21 -08007146static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7147 /* amp, pre, main, post */
7148 { 0, 0, 0, 0 }, /* QME7342 mfg settings */
7149 { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
7150};
7151
Ralph Campbellf9315512010-05-23 21:44:54 -07007152static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7153 unsigned atten)
7154{
7155 /*
7156 * The attenuation table starts at 2dB for entry 1,
7157 * with entry 0 being the loopback entry.
7158 */
7159 if (atten <= 2)
7160 atten = 1;
7161 else if (atten > TXDDS_TABLE_SZ)
7162 atten = TXDDS_TABLE_SZ - 1;
7163 else
7164 atten--;
7165 return txdds + atten;
7166}
7167
7168/*
Ralph Campbella77fcf82010-05-26 16:08:44 -07007169 * if override is set, the module parameter txselect has a value
Ralph Campbellf9315512010-05-23 21:44:54 -07007170 * for this specific port, so use it, rather than our normal mechanism.
7171 */
7172static void find_best_ent(struct qib_pportdata *ppd,
7173 const struct txdds_ent **sdr_dds,
7174 const struct txdds_ent **ddr_dds,
7175 const struct txdds_ent **qdr_dds, int override)
7176{
7177 struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7178 int idx;
7179
7180 /* Search table of known cables */
7181 for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7182 const struct vendor_txdds_ent *v = vendor_txdds + idx;
7183
7184 if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7185 (!v->partnum ||
7186 !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7187 *sdr_dds = &v->sdr;
7188 *ddr_dds = &v->ddr;
7189 *qdr_dds = &v->qdr;
7190 return;
7191 }
7192 }
7193
Mitko Haralanovdde05cb2011-10-19 18:46:40 -04007194 /* Active cables don't have attenuation so we only set SERDES
7195 * settings to account for the attenuation of the board traces. */
Ralph Campbellf9315512010-05-23 21:44:54 -07007196 if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7197 *sdr_dds = txdds_sdr + ppd->dd->board_atten;
7198 *ddr_dds = txdds_ddr + ppd->dd->board_atten;
7199 *qdr_dds = txdds_qdr + ppd->dd->board_atten;
7200 return;
7201 }
7202
7203 if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7204 qd->atten[1])) {
7205 *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7206 *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7207 *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7208 return;
Ralph Campbella77fcf82010-05-26 16:08:44 -07007209 } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
Ralph Campbellf9315512010-05-23 21:44:54 -07007210 /*
7211 * If we have no (or incomplete) data from the cable
Ralph Campbella77fcf82010-05-26 16:08:44 -07007212 * EEPROM, or no QSFP, or override is set, use the
7213 * module parameter value to index into the attentuation
7214 * table.
Ralph Campbellf9315512010-05-23 21:44:54 -07007215 */
Ralph Campbella77fcf82010-05-26 16:08:44 -07007216 idx = ppd->cpspec->no_eep;
7217 *sdr_dds = &txdds_sdr[idx];
7218 *ddr_dds = &txdds_ddr[idx];
7219 *qdr_dds = &txdds_qdr[idx];
7220 } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7221 /* similar to above, but index into the "extra" table. */
7222 idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7223 *sdr_dds = &txdds_extra_sdr[idx];
7224 *ddr_dds = &txdds_extra_ddr[idx];
7225 *qdr_dds = &txdds_extra_qdr[idx];
Mike Marciniszyne7062032011-01-10 17:42:21 -08007226 } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7227 ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7228 TXDDS_MFG_SZ)) {
7229 idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7230 printk(KERN_INFO QIB_DRV_NAME
7231 " IB%u:%u use idx %u into txdds_mfg\n",
7232 ppd->dd->unit, ppd->port, idx);
7233 *sdr_dds = &txdds_extra_mfg[idx];
7234 *ddr_dds = &txdds_extra_mfg[idx];
7235 *qdr_dds = &txdds_extra_mfg[idx];
Ralph Campbella77fcf82010-05-26 16:08:44 -07007236 } else {
7237 /* this shouldn't happen, it's range checked */
7238 *sdr_dds = txdds_sdr + qib_long_atten;
7239 *ddr_dds = txdds_ddr + qib_long_atten;
7240 *qdr_dds = txdds_qdr + qib_long_atten;
Ralph Campbellf9315512010-05-23 21:44:54 -07007241 }
7242}
7243
7244static void init_txdds_table(struct qib_pportdata *ppd, int override)
7245{
7246 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7247 struct txdds_ent *dds;
7248 int idx;
7249 int single_ent = 0;
7250
Ralph Campbella77fcf82010-05-26 16:08:44 -07007251 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7252
7253 /* for mez cards or override, use the selected value for all entries */
7254 if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
Ralph Campbellf9315512010-05-23 21:44:54 -07007255 single_ent = 1;
Ralph Campbellf9315512010-05-23 21:44:54 -07007256
7257 /* Fill in the first entry with the best entry found. */
7258 set_txdds(ppd, 0, sdr_dds);
7259 set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7260 set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
Ralph Campbella77fcf82010-05-26 16:08:44 -07007261 if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7262 QIBL_LINKACTIVE)) {
7263 dds = (struct txdds_ent *)(ppd->link_speed_active ==
7264 QIB_IB_QDR ? qdr_dds :
7265 (ppd->link_speed_active ==
7266 QIB_IB_DDR ? ddr_dds : sdr_dds));
7267 write_tx_serdes_param(ppd, dds);
7268 }
Ralph Campbellf9315512010-05-23 21:44:54 -07007269
7270 /* Fill in the remaining entries with the default table values. */
7271 for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7272 set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7273 set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7274 single_ent ? ddr_dds : txdds_ddr + idx);
7275 set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7276 single_ent ? qdr_dds : txdds_qdr + idx);
7277 }
7278}
7279
7280#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7281#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7282#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7283#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7284#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7285#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7286#define AHB_TRANS_TRIES 10
7287
7288/*
7289 * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7290 * 5=subsystem which is why most calls have "chan + chan >> 1"
7291 * for the channel argument.
7292 */
7293static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7294 u32 data, u32 mask)
7295{
7296 u32 rd_data, wr_data, sz_mask;
7297 u64 trans, acc, prev_acc;
7298 u32 ret = 0xBAD0BAD;
7299 int tries;
7300
7301 prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7302 /* From this point on, make sure we return access */
7303 acc = (quad << 1) | 1;
7304 qib_write_kreg(dd, KR_AHB_ACC, acc);
7305
7306 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7307 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7308 if (trans & AHB_TRANS_RDY)
7309 break;
7310 }
7311 if (tries >= AHB_TRANS_TRIES) {
7312 qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7313 goto bail;
7314 }
7315
7316 /* If mask is not all 1s, we need to read, but different SerDes
7317 * entities have different sizes
7318 */
7319 sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7320 wr_data = data & mask & sz_mask;
7321 if ((~mask & sz_mask) != 0) {
7322 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7323 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7324
7325 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7326 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7327 if (trans & AHB_TRANS_RDY)
7328 break;
7329 }
7330 if (tries >= AHB_TRANS_TRIES) {
7331 qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7332 AHB_TRANS_TRIES);
7333 goto bail;
7334 }
7335 /* Re-read in case host split reads and read data first */
7336 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7337 rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7338 wr_data |= (rd_data & ~mask & sz_mask);
7339 }
7340
7341 /* If mask is not zero, we need to write. */
7342 if (mask & sz_mask) {
7343 trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7344 trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7345 trans |= AHB_WR;
7346 qib_write_kreg(dd, KR_AHB_TRANS, trans);
7347
7348 for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7349 trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7350 if (trans & AHB_TRANS_RDY)
7351 break;
7352 }
7353 if (tries >= AHB_TRANS_TRIES) {
7354 qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7355 AHB_TRANS_TRIES);
7356 goto bail;
7357 }
7358 }
7359 ret = wr_data;
7360bail:
7361 qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7362 return ret;
7363}
7364
7365static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7366 unsigned mask)
7367{
7368 struct qib_devdata *dd = ppd->dd;
7369 int chan;
7370 u32 rbc;
7371
7372 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7373 ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7374 data, mask);
7375 rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7376 addr, 0, 0);
7377 }
7378}
7379
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08007380static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7381{
7382 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
Mitko Haralanov31264482011-06-09 20:27:26 +00007383 u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7384
7385 if (enable && !state) {
7386 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
7387 ppd->dd->unit, ppd->port);
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08007388 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
Mitko Haralanov31264482011-06-09 20:27:26 +00007389 } else if (!enable && state) {
7390 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
7391 ppd->dd->unit, ppd->port);
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08007392 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
Mitko Haralanov31264482011-06-09 20:27:26 +00007393 }
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08007394 qib_write_kreg_port(ppd, krp_serdesctrl, data);
7395}
7396
Ralph Campbellf9315512010-05-23 21:44:54 -07007397static int serdes_7322_init(struct qib_pportdata *ppd)
7398{
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08007399 int ret = 0;
7400 if (ppd->dd->cspec->r1)
7401 ret = serdes_7322_init_old(ppd);
7402 else
7403 ret = serdes_7322_init_new(ppd);
7404 return ret;
7405}
7406
7407static int serdes_7322_init_old(struct qib_pportdata *ppd)
7408{
Ralph Campbellf9315512010-05-23 21:44:54 -07007409 u32 le_val;
7410
7411 /*
7412 * Initialize the Tx DDS tables. Also done every QSFP event,
7413 * for adapters with QSFP
7414 */
7415 init_txdds_table(ppd, 0);
7416
Ralph Campbella77fcf82010-05-26 16:08:44 -07007417 /* ensure no tx overrides from earlier driver loads */
7418 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7419 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7420 reset_tx_deemphasis_override));
7421
Ralph Campbellf9315512010-05-23 21:44:54 -07007422 /* Patch some SerDes defaults to "Better for IB" */
7423 /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7424 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7425
7426 /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7427 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7428 /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7429 ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7430
7431 /* May be overridden in qsfp_7322_event */
7432 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7433 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7434
7435 /* enable LE1 adaptation for all but QME, which is disabled */
7436 le_val = IS_QME(ppd->dd) ? 0 : 1;
7437 ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7438
7439 /* Clear cmode-override, may be set from older driver */
7440 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7441
7442 /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7443 ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7444
7445 /* setup LoS params; these are subsystem, so chan == 5 */
7446 /* LoS filter threshold_count on, ch 0-3, set to 8 */
7447 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7448 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7449 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7450 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7451
7452 /* LoS filter threshold_count off, ch 0-3, set to 4 */
7453 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7454 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7455 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7456 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7457
7458 /* LoS filter select enabled */
7459 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7460
7461 /* LoS target data: SDR=4, DDR=2, QDR=1 */
7462 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7463 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7464 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7465
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08007466 serdes_7322_los_enable(ppd, 1);
Ralph Campbellf9315512010-05-23 21:44:54 -07007467
7468 /* rxbistena; set 0 to avoid effects of it switch later */
7469 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7470
7471 /* Configure 4 DFE taps, and only they adapt */
7472 ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7473
7474 /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7475 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7476 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7477
7478 /*
7479 * Set receive adaptation mode. SDR and DDR adaptation are
7480 * always on, and QDR is initially enabled; later disabled.
7481 */
7482 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7483 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7484 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7485 ppd->dd->cspec->r1 ?
7486 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7487 ppd->cpspec->qdr_dfe_on = 1;
7488
Ralph Campbella77fcf82010-05-26 16:08:44 -07007489 /* FLoop LOS gate: PPM filter enabled */
Ralph Campbellf9315512010-05-23 21:44:54 -07007490 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7491
7492 /* rx offset center enabled */
7493 ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7494
7495 if (!ppd->dd->cspec->r1) {
7496 ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7497 ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7498 }
7499
7500 /* Set the frequency loop bandwidth to 15 */
7501 ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7502
7503 return 0;
7504}
7505
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08007506static int serdes_7322_init_new(struct qib_pportdata *ppd)
7507{
7508 u64 tstart;
7509 u32 le_val, rxcaldone;
7510 int chan, chan_done = (1 << SERDES_CHANS) - 1;
7511
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08007512 /* Clear cmode-override, may be set from older driver */
7513 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7514
7515 /* ensure no tx overrides from earlier driver loads */
7516 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7517 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7518 reset_tx_deemphasis_override));
7519
7520 /* START OF LSI SUGGESTED SERDES BRINGUP */
7521 /* Reset - Calibration Setup */
7522 /* Stop DFE adaptaion */
7523 ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7524 /* Disable LE1 */
7525 ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7526 /* Disable autoadapt for LE1 */
7527 ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7528 /* Disable LE2 */
7529 ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7530 /* Disable VGA */
7531 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7532 /* Disable AFE Offset Cancel */
7533 ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7534 /* Disable Timing Loop */
7535 ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7536 /* Disable Frequency Loop */
7537 ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7538 /* Disable Baseline Wander Correction */
7539 ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7540 /* Disable RX Calibration */
7541 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7542 /* Disable RX Offset Calibration */
7543 ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7544 /* Select BB CDR */
7545 ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7546 /* CDR Step Size */
7547 ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7548 /* Enable phase Calibration */
7549 ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7550 /* DFE Bandwidth [2:14-12] */
7551 ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7552 /* DFE Config (4 taps only) */
7553 ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7554 /* Gain Loop Bandwidth */
7555 if (!ppd->dd->cspec->r1) {
7556 ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7557 ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
7558 } else {
7559 ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
7560 }
7561 /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
7562 /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
7563 /* Data Rate Select [5:7-6] (leave as default) */
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007564 /* RX Parallel Word Width [3:10-8] (leave as default) */
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08007565
7566 /* RX REST */
7567 /* Single- or Multi-channel reset */
7568 /* RX Analog reset */
7569 /* RX Digital reset */
7570 ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
7571 msleep(20);
7572 /* RX Analog reset */
7573 ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
7574 msleep(20);
7575 /* RX Digital reset */
7576 ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
7577 msleep(20);
7578
7579 /* setup LoS params; these are subsystem, so chan == 5 */
7580 /* LoS filter threshold_count on, ch 0-3, set to 8 */
7581 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7582 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7583 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7584 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7585
7586 /* LoS filter threshold_count off, ch 0-3, set to 4 */
7587 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7588 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7589 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7590 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7591
7592 /* LoS filter select enabled */
7593 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7594
7595 /* LoS target data: SDR=4, DDR=2, QDR=1 */
7596 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7597 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7598 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7599
7600 /* Turn on LOS on initial SERDES init */
7601 serdes_7322_los_enable(ppd, 1);
7602 /* FLoop LOS gate: PPM filter enabled */
7603 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7604
7605 /* RX LATCH CALIBRATION */
7606 /* Enable Eyefinder Phase Calibration latch */
7607 ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
7608 /* Enable RX Offset Calibration latch */
7609 ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
7610 msleep(20);
7611 /* Start Calibration */
7612 ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
7613 tstart = get_jiffies_64();
7614 while (chan_done &&
Mitko Haralanov9f5754e2011-05-09 22:07:31 -07007615 !time_after64(get_jiffies_64(),
7616 tstart + msecs_to_jiffies(500))) {
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08007617 msleep(20);
7618 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7619 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7620 (chan + (chan >> 1)),
7621 25, 0, 0);
7622 if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
7623 (~chan_done & (1 << chan)) == 0)
7624 chan_done &= ~(1 << chan);
7625 }
7626 }
7627 if (chan_done) {
7628 printk(KERN_INFO QIB_DRV_NAME
7629 " Serdes %d calibration not done after .5 sec: 0x%x\n",
7630 IBSD(ppd->hw_pidx), chan_done);
7631 } else {
7632 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7633 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7634 (chan + (chan >> 1)),
7635 25, 0, 0);
7636 if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
7637 printk(KERN_INFO QIB_DRV_NAME
7638 " Serdes %d chan %d calibration "
7639 "failed\n", IBSD(ppd->hw_pidx), chan);
7640 }
7641 }
7642
7643 /* Turn off Calibration */
7644 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7645 msleep(20);
7646
7647 /* BRING RX UP */
7648 /* Set LE2 value (May be overridden in qsfp_7322_event) */
7649 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7650 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7651 /* Set LE2 Loop bandwidth */
7652 ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
7653 /* Enable LE2 */
7654 ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
7655 msleep(20);
7656 /* Enable H0 only */
7657 ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
7658 /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7659 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7660 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7661 /* Enable VGA */
7662 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7663 msleep(20);
7664 /* Set Frequency Loop Bandwidth */
7665 ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
7666 /* Enable Frequency Loop */
7667 ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
7668 /* Set Timing Loop Bandwidth */
7669 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7670 /* Enable Timing Loop */
7671 ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
7672 msleep(50);
7673 /* Enable DFE
7674 * Set receive adaptation mode. SDR and DDR adaptation are
7675 * always on, and QDR is initially enabled; later disabled.
7676 */
7677 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7678 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7679 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7680 ppd->dd->cspec->r1 ?
7681 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7682 ppd->cpspec->qdr_dfe_on = 1;
7683 /* Disable LE1 */
7684 ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
7685 /* Disable auto adapt for LE1 */
7686 ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
7687 msleep(20);
7688 /* Enable AFE Offset Cancel */
7689 ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
7690 /* Enable Baseline Wander Correction */
7691 ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
7692 /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7693 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7694 /* VGA output common mode */
7695 ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
7696
Mitko Haralanovdde05cb2011-10-19 18:46:40 -04007697 /*
7698 * Initialize the Tx DDS tables. Also done every QSFP event,
7699 * for adapters with QSFP
7700 */
7701 init_txdds_table(ppd, 0);
7702
Mike Marciniszyna0a234d2011-01-10 17:42:20 -08007703 return 0;
7704}
7705
Ralph Campbellf9315512010-05-23 21:44:54 -07007706/* start adjust QMH serdes parameters */
7707
7708static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
7709{
7710 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7711 9, code << 9, 0x3f << 9);
7712}
7713
7714static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
7715 int enable, u32 tapenable)
7716{
7717 if (enable)
7718 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7719 1, 3 << 10, 0x1f << 10);
7720 else
7721 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7722 1, 0, 0x1f << 10);
7723}
7724
7725/* Set clock to 1, 0, 1, 0 */
7726static void clock_man(struct qib_pportdata *ppd, int chan)
7727{
7728 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7729 4, 0x4000, 0x4000);
7730 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7731 4, 0, 0x4000);
7732 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7733 4, 0x4000, 0x4000);
7734 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
7735 4, 0, 0x4000);
7736}
7737
7738/*
7739 * write the current Tx serdes pre,post,main,amp settings into the serdes.
7740 * The caller must pass the settings appropriate for the current speed,
7741 * or not care if they are correct for the current speed.
7742 */
7743static void write_tx_serdes_param(struct qib_pportdata *ppd,
7744 struct txdds_ent *txdds)
7745{
7746 u64 deemph;
7747
7748 deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
7749 /* field names for amp, main, post, pre, respectively */
7750 deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
7751 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
7752 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
7753 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
Ralph Campbella77fcf82010-05-26 16:08:44 -07007754
7755 deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7756 tx_override_deemphasis_select);
7757 deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7758 txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7759 txampcntl_d2a);
7760 deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7761 txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7762 txc0_ena);
7763 deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7764 txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7765 txcp1_ena);
7766 deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7767 txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
Ralph Campbellf9315512010-05-23 21:44:54 -07007768 txcn1_ena);
7769 qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
7770}
7771
7772/*
Ralph Campbella77fcf82010-05-26 16:08:44 -07007773 * Set the parameters for mez cards on link bounce, so they are
7774 * always exactly what was requested. Similar logic to init_txdds
7775 * but does just the serdes.
Ralph Campbellf9315512010-05-23 21:44:54 -07007776 */
7777static void adj_tx_serdes(struct qib_pportdata *ppd)
7778{
Ralph Campbella77fcf82010-05-26 16:08:44 -07007779 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7780 struct txdds_ent *dds;
Ralph Campbellf9315512010-05-23 21:44:54 -07007781
Ralph Campbella77fcf82010-05-26 16:08:44 -07007782 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
7783 dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
7784 qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
7785 ddr_dds : sdr_dds));
7786 write_tx_serdes_param(ppd, dds);
Ralph Campbellf9315512010-05-23 21:44:54 -07007787}
7788
7789/* set QDR forced value for H1, if needed */
7790static void force_h1(struct qib_pportdata *ppd)
7791{
7792 int chan;
7793
7794 ppd->cpspec->qdr_reforce = 0;
7795 if (!ppd->dd->cspec->r1)
7796 return;
7797
7798 for (chan = 0; chan < SERDES_CHANS; chan++) {
7799 set_man_mode_h1(ppd, chan, 1, 0);
7800 set_man_code(ppd, chan, ppd->cpspec->h1_val);
7801 clock_man(ppd, chan);
7802 set_man_mode_h1(ppd, chan, 0, 0);
7803 }
7804}
7805
Ralph Campbellf9315512010-05-23 21:44:54 -07007806#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
7807#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
7808
7809#define R_OPCODE_LSB 3
7810#define R_OP_NOP 0
7811#define R_OP_SHIFT 2
7812#define R_OP_UPDATE 3
7813#define R_TDI_LSB 2
7814#define R_TDO_LSB 1
7815#define R_RDY 1
7816
7817static int qib_r_grab(struct qib_devdata *dd)
7818{
7819 u64 val;
7820 val = SJA_EN;
7821 qib_write_kreg(dd, kr_r_access, val);
7822 qib_read_kreg32(dd, kr_scratch);
7823 return 0;
7824}
7825
7826/* qib_r_wait_for_rdy() not only waits for the ready bit, it
7827 * returns the current state of R_TDO
7828 */
7829static int qib_r_wait_for_rdy(struct qib_devdata *dd)
7830{
7831 u64 val;
7832 int timeout;
7833 for (timeout = 0; timeout < 100 ; ++timeout) {
7834 val = qib_read_kreg32(dd, kr_r_access);
7835 if (val & R_RDY)
7836 return (val >> R_TDO_LSB) & 1;
7837 }
7838 return -1;
7839}
7840
7841static int qib_r_shift(struct qib_devdata *dd, int bisten,
7842 int len, u8 *inp, u8 *outp)
7843{
7844 u64 valbase, val;
7845 int ret, pos;
7846
7847 valbase = SJA_EN | (bisten << BISTEN_LSB) |
7848 (R_OP_SHIFT << R_OPCODE_LSB);
7849 ret = qib_r_wait_for_rdy(dd);
7850 if (ret < 0)
7851 goto bail;
7852 for (pos = 0; pos < len; ++pos) {
7853 val = valbase;
7854 if (outp) {
7855 outp[pos >> 3] &= ~(1 << (pos & 7));
7856 outp[pos >> 3] |= (ret << (pos & 7));
7857 }
7858 if (inp) {
7859 int tdi = inp[pos >> 3] >> (pos & 7);
7860 val |= ((tdi & 1) << R_TDI_LSB);
7861 }
7862 qib_write_kreg(dd, kr_r_access, val);
7863 qib_read_kreg32(dd, kr_scratch);
7864 ret = qib_r_wait_for_rdy(dd);
7865 if (ret < 0)
7866 break;
7867 }
7868 /* Restore to NOP between operations. */
7869 val = SJA_EN | (bisten << BISTEN_LSB);
7870 qib_write_kreg(dd, kr_r_access, val);
7871 qib_read_kreg32(dd, kr_scratch);
7872 ret = qib_r_wait_for_rdy(dd);
7873
7874 if (ret >= 0)
7875 ret = pos;
7876bail:
7877 return ret;
7878}
7879
7880static int qib_r_update(struct qib_devdata *dd, int bisten)
7881{
7882 u64 val;
7883 int ret;
7884
7885 val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
7886 ret = qib_r_wait_for_rdy(dd);
7887 if (ret >= 0) {
7888 qib_write_kreg(dd, kr_r_access, val);
7889 qib_read_kreg32(dd, kr_scratch);
7890 }
7891 return ret;
7892}
7893
7894#define BISTEN_PORT_SEL 15
7895#define LEN_PORT_SEL 625
7896#define BISTEN_AT 17
7897#define LEN_AT 156
7898#define BISTEN_ETM 16
7899#define LEN_ETM 632
7900
7901#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
7902
7903/* these are common for all IB port use cases. */
7904static u8 reset_at[BIT2BYTE(LEN_AT)] = {
7905 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7906 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7907};
7908static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
7909 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7910 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7911 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
7912 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
7913 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
7914 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
7915 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7916 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
7917};
7918static u8 at[BIT2BYTE(LEN_AT)] = {
7919 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
7920 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
7921};
7922
7923/* used for IB1 or IB2, only one in use */
7924static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
7925 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7926 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7927 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7928 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
7929 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7930 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
7931 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
7932 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
7933};
7934
7935/* used when both IB1 and IB2 are in use */
7936static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
7937 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7938 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
7939 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7940 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
7941 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
7942 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
7943 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
7944 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
7945};
7946
7947/* used when only IB1 is in use */
7948static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
7949 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
7950 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
7951 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7952 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7953 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
7954 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7955 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7956 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
7957};
7958
7959/* used when only IB2 is in use */
7960static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
7961 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
7962 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
7963 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
7964 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
7965 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
7966 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
7967 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
7968 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
7969};
7970
7971/* used when both IB1 and IB2 are in use */
7972static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
7973 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
7974 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
7975 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7976 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
7977 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
7978 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
7979 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
7980 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
7981};
7982
7983/*
7984 * Do setup to properly handle IB link recovery; if port is zero, we
7985 * are initializing to cover both ports; otherwise we are initializing
7986 * to cover a single port card, or the port has reached INIT and we may
7987 * need to switch coverage types.
7988 */
7989static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
7990{
7991 u8 *portsel, *etm;
7992 struct qib_devdata *dd = ppd->dd;
7993
7994 if (!ppd->dd->cspec->r1)
7995 return;
7996 if (!both) {
7997 dd->cspec->recovery_ports_initted++;
7998 ppd->cpspec->recovery_init = 1;
7999 }
8000 if (!both && dd->cspec->recovery_ports_initted == 1) {
8001 portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8002 etm = atetm_1port;
8003 } else {
8004 portsel = portsel_2port;
8005 etm = atetm_2port;
8006 }
8007
8008 if (qib_r_grab(dd) < 0 ||
8009 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8010 qib_r_update(dd, BISTEN_ETM) < 0 ||
8011 qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8012 qib_r_update(dd, BISTEN_AT) < 0 ||
8013 qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8014 portsel, NULL) < 0 ||
8015 qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8016 qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8017 qib_r_update(dd, BISTEN_AT) < 0 ||
8018 qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8019 qib_r_update(dd, BISTEN_ETM) < 0)
8020 qib_dev_err(dd, "Failed IB link recovery setup\n");
8021}
8022
8023static void check_7322_rxe_status(struct qib_pportdata *ppd)
8024{
8025 struct qib_devdata *dd = ppd->dd;
8026 u64 fmask;
8027
8028 if (dd->cspec->recovery_ports_initted != 1)
8029 return; /* rest doesn't apply to dualport */
8030 qib_write_kreg(dd, kr_control, dd->control |
8031 SYM_MASK(Control, FreezeMode));
8032 (void)qib_read_kreg64(dd, kr_scratch);
8033 udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8034 fmask = qib_read_kreg64(dd, kr_act_fmask);
8035 if (!fmask) {
8036 /*
8037 * require a powercycle before we'll work again, and make
8038 * sure we get no more interrupts, and don't turn off
8039 * freeze.
8040 */
8041 ppd->dd->cspec->stay_in_freeze = 1;
8042 qib_7322_set_intr_state(ppd->dd, 0);
8043 qib_write_kreg(dd, kr_fmask, 0ULL);
8044 qib_dev_err(dd, "HCA unusable until powercycled\n");
8045 return; /* eventually reset */
8046 }
8047
8048 qib_write_kreg(ppd->dd, kr_hwerrclear,
8049 SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8050
8051 /* don't do the full clear_freeze(), not needed for this */
8052 qib_write_kreg(dd, kr_control, dd->control);
8053 qib_read_kreg32(dd, kr_scratch);
8054 /* take IBC out of reset */
8055 if (ppd->link_speed_supported) {
8056 ppd->cpspec->ibcctrl_a &=
8057 ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8058 qib_write_kreg_port(ppd, krp_ibcctrl_a,
8059 ppd->cpspec->ibcctrl_a);
8060 qib_read_kreg32(dd, kr_scratch);
8061 if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8062 qib_set_ib_7322_lstate(ppd, 0,
8063 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8064 }
8065}