blob: 4585dc018ad122e3e469acc03569ee886d9e2589 [file] [log] [blame]
Jeff Garzikb5762942007-10-25 20:58:22 -04001/*
Andy Yan20b09c22009-05-08 17:46:40 -04002 * Marvell 88SE64xx/88SE94xx main function
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
Xiangliang Yu0b15fb12011-04-26 06:36:51 -07006 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
Andy Yan20b09c22009-05-08 17:46:40 -04007 *
8 * This file is licensed under GPLv2.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; version 2 of the
13 * License.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * USA
24*/
Jeff Garzikb5762942007-10-25 20:58:22 -040025
Jeff Garzikdd4969a2009-05-08 17:44:01 -040026#include "mv_sas.h"
Ke Wei8f261aa2008-02-23 21:15:27 +080027
Jeff Garzikdd4969a2009-05-08 17:44:01 -040028static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
29{
30 if (task->lldd_task) {
31 struct mvs_slot_info *slot;
Andy Yanf9da3be2009-05-14 20:41:21 -040032 slot = task->lldd_task;
Andy Yan20b09c22009-05-08 17:46:40 -040033 *tag = slot->slot_tag;
Jeff Garzikdd4969a2009-05-08 17:44:01 -040034 return 1;
35 }
36 return 0;
37}
Ke Wei8f261aa2008-02-23 21:15:27 +080038
Andy Yan20b09c22009-05-08 17:46:40 -040039void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
Jeff Garzikdd4969a2009-05-08 17:44:01 -040040{
Andy Yanf9da3be2009-05-14 20:41:21 -040041 void *bitmap = &mvi->tags;
Jeff Garzikdd4969a2009-05-08 17:44:01 -040042 clear_bit(tag, bitmap);
43}
Jeff Garzikb5762942007-10-25 20:58:22 -040044
Andy Yan20b09c22009-05-08 17:46:40 -040045void mvs_tag_free(struct mvs_info *mvi, u32 tag)
Jeff Garzikdd4969a2009-05-08 17:44:01 -040046{
47 mvs_tag_clear(mvi, tag);
48}
Jeff Garzikb5762942007-10-25 20:58:22 -040049
Andy Yan20b09c22009-05-08 17:46:40 -040050void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
Jeff Garzikdd4969a2009-05-08 17:44:01 -040051{
Andy Yanf9da3be2009-05-14 20:41:21 -040052 void *bitmap = &mvi->tags;
Jeff Garzikdd4969a2009-05-08 17:44:01 -040053 set_bit(tag, bitmap);
54}
55
Andy Yan20b09c22009-05-08 17:46:40 -040056inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
Jeff Garzikdd4969a2009-05-08 17:44:01 -040057{
58 unsigned int index, tag;
Andy Yanf9da3be2009-05-14 20:41:21 -040059 void *bitmap = &mvi->tags;
Jeff Garzikdd4969a2009-05-08 17:44:01 -040060
Andy Yan20b09c22009-05-08 17:46:40 -040061 index = find_first_zero_bit(bitmap, mvi->tags_num);
Jeff Garzikdd4969a2009-05-08 17:44:01 -040062 tag = index;
Andy Yan20b09c22009-05-08 17:46:40 -040063 if (tag >= mvi->tags_num)
Jeff Garzikdd4969a2009-05-08 17:44:01 -040064 return -SAS_QUEUE_FULL;
65 mvs_tag_set(mvi, tag);
66 *tag_out = tag;
67 return 0;
68}
69
70void mvs_tag_init(struct mvs_info *mvi)
71{
72 int i;
Andy Yan20b09c22009-05-08 17:46:40 -040073 for (i = 0; i < mvi->tags_num; ++i)
Jeff Garzikdd4969a2009-05-08 17:44:01 -040074 mvs_tag_clear(mvi, i);
75}
Jeff Garzikb5762942007-10-25 20:58:22 -040076
Andy Yan20b09c22009-05-08 17:46:40 -040077void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
Ke Wei8f261aa2008-02-23 21:15:27 +080078{
79 u32 i;
80 u32 run;
81 u32 offset;
82
83 offset = 0;
84 while (size) {
Andy Yan20b09c22009-05-08 17:46:40 -040085 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
Ke Wei8f261aa2008-02-23 21:15:27 +080086 if (size >= 16)
87 run = 16;
88 else
89 run = size;
90 size -= run;
91 for (i = 0; i < 16; i++) {
92 if (i < run)
Andy Yan20b09c22009-05-08 17:46:40 -040093 printk(KERN_DEBUG"%02X ", (u32)data[i]);
Ke Wei8f261aa2008-02-23 21:15:27 +080094 else
Andy Yan20b09c22009-05-08 17:46:40 -040095 printk(KERN_DEBUG" ");
Ke Wei8f261aa2008-02-23 21:15:27 +080096 }
Andy Yan20b09c22009-05-08 17:46:40 -040097 printk(KERN_DEBUG": ");
Ke Wei8f261aa2008-02-23 21:15:27 +080098 for (i = 0; i < run; i++)
Andy Yan20b09c22009-05-08 17:46:40 -040099 printk(KERN_DEBUG"%c",
100 isalnum(data[i]) ? data[i] : '.');
101 printk(KERN_DEBUG"\n");
Ke Wei8f261aa2008-02-23 21:15:27 +0800102 data = &data[16];
103 offset += run;
104 }
Andy Yan20b09c22009-05-08 17:46:40 -0400105 printk(KERN_DEBUG"\n");
Ke Wei8f261aa2008-02-23 21:15:27 +0800106}
107
Andy Yan20b09c22009-05-08 17:46:40 -0400108#if (_MV_DUMP > 1)
Ke Wei8f261aa2008-02-23 21:15:27 +0800109static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
110 enum sas_protocol proto)
111{
Ke Wei8f261aa2008-02-23 21:15:27 +0800112 u32 offset;
Ke Wei8f261aa2008-02-23 21:15:27 +0800113 struct mvs_slot_info *slot = &mvi->slot_info[tag];
114
115 offset = slot->cmd_size + MVS_OAF_SZ +
Andy Yan20b09c22009-05-08 17:46:40 -0400116 MVS_CHIP_DISP->prd_size() * slot->n_elem;
117 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
Ke Wei8f261aa2008-02-23 21:15:27 +0800118 tag);
119 mvs_hexdump(32, (u8 *) slot->response,
120 (u32) slot->buf_dma + offset);
Ke Wei8f261aa2008-02-23 21:15:27 +0800121}
Ke Weiee1f1c22008-03-27 14:53:47 +0800122#endif
Ke Wei8f261aa2008-02-23 21:15:27 +0800123
124static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
125 enum sas_protocol proto)
126{
Andy Yan20b09c22009-05-08 17:46:40 -0400127#if (_MV_DUMP > 1)
Ke Weiee1f1c22008-03-27 14:53:47 +0800128 u32 sz, w_ptr;
Ke Wei8f261aa2008-02-23 21:15:27 +0800129 u64 addr;
Ke Wei8f261aa2008-02-23 21:15:27 +0800130 struct mvs_slot_info *slot = &mvi->slot_info[tag];
131
132 /*Delivery Queue */
Andy Yan20b09c22009-05-08 17:46:40 -0400133 sz = MVS_CHIP_SLOT_SZ;
Ke Weiee1f1c22008-03-27 14:53:47 +0800134 w_ptr = slot->tx;
Andy Yan20b09c22009-05-08 17:46:40 -0400135 addr = mvi->tx_dma;
136 dev_printk(KERN_DEBUG, mvi->dev,
Ke Weiee1f1c22008-03-27 14:53:47 +0800137 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
Andy Yan20b09c22009-05-08 17:46:40 -0400138 dev_printk(KERN_DEBUG, mvi->dev,
Ke Wei8f261aa2008-02-23 21:15:27 +0800139 "Delivery Queue Base Address=0x%llX (PA)"
140 "(tx_dma=0x%llX), Entry=%04d\n",
Andy Yan20b09c22009-05-08 17:46:40 -0400141 addr, (unsigned long long)mvi->tx_dma, w_ptr);
Ke Wei8f261aa2008-02-23 21:15:27 +0800142 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
143 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
144 /*Command List */
Ke Weiee1f1c22008-03-27 14:53:47 +0800145 addr = mvi->slot_dma;
Andy Yan20b09c22009-05-08 17:46:40 -0400146 dev_printk(KERN_DEBUG, mvi->dev,
Ke Wei8f261aa2008-02-23 21:15:27 +0800147 "Command List Base Address=0x%llX (PA)"
148 "(slot_dma=0x%llX), Header=%03d\n",
Andy Yan20b09c22009-05-08 17:46:40 -0400149 addr, (unsigned long long)slot->buf_dma, tag);
150 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
Ke Wei8f261aa2008-02-23 21:15:27 +0800151 /*mvs_cmd_hdr */
152 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
153 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
154 /*1.command table area */
Andy Yan20b09c22009-05-08 17:46:40 -0400155 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
Ke Wei8f261aa2008-02-23 21:15:27 +0800156 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
157 /*2.open address frame area */
Andy Yan20b09c22009-05-08 17:46:40 -0400158 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
Ke Wei8f261aa2008-02-23 21:15:27 +0800159 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
160 (u32) slot->buf_dma + slot->cmd_size);
161 /*3.status buffer */
162 mvs_hba_sb_dump(mvi, tag, proto);
163 /*4.PRD table */
Andy Yan20b09c22009-05-08 17:46:40 -0400164 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
165 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
Ke Wei8f261aa2008-02-23 21:15:27 +0800166 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
167 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
168#endif
169}
170
171static void mvs_hba_cq_dump(struct mvs_info *mvi)
172{
Ke Weiee1f1c22008-03-27 14:53:47 +0800173#if (_MV_DUMP > 2)
Ke Wei8f261aa2008-02-23 21:15:27 +0800174 u64 addr;
175 void __iomem *regs = mvi->regs;
Ke Wei8f261aa2008-02-23 21:15:27 +0800176 u32 entry = mvi->rx_cons + 1;
177 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
178
179 /*Completion Queue */
180 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
Andy Yan20b09c22009-05-08 17:46:40 -0400181 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
Ke Weiee1f1c22008-03-27 14:53:47 +0800182 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
Andy Yan20b09c22009-05-08 17:46:40 -0400183 dev_printk(KERN_DEBUG, mvi->dev,
Ke Wei8f261aa2008-02-23 21:15:27 +0800184 "Completion List Base Address=0x%llX (PA), "
185 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
186 addr, entry - 1, mvi->rx[0]);
187 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
188 mvi->rx_dma + sizeof(u32) * entry);
189#endif
190}
191
Andy Yan20b09c22009-05-08 17:46:40 -0400192void mvs_get_sas_addr(void *buf, u32 buflen)
Ke Wei8f261aa2008-02-23 21:15:27 +0800193{
Andy Yan20b09c22009-05-08 17:46:40 -0400194 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
195}
Ke Wei8f261aa2008-02-23 21:15:27 +0800196
Andy Yan20b09c22009-05-08 17:46:40 -0400197struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
198{
199 unsigned long i = 0, j = 0, hi = 0;
200 struct sas_ha_struct *sha = dev->port->ha;
201 struct mvs_info *mvi = NULL;
202 struct asd_sas_phy *phy;
Ke Wei8f261aa2008-02-23 21:15:27 +0800203
Andy Yan20b09c22009-05-08 17:46:40 -0400204 while (sha->sas_port[i]) {
205 if (sha->sas_port[i] == dev->port) {
206 phy = container_of(sha->sas_port[i]->phy_list.next,
207 struct asd_sas_phy, port_phy_el);
208 j = 0;
209 while (sha->sas_phy[j]) {
210 if (sha->sas_phy[j] == phy)
211 break;
212 j++;
Jeff Garzikb5762942007-10-25 20:58:22 -0400213 }
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400214 break;
Jeff Garzikb5762942007-10-25 20:58:22 -0400215 }
Andy Yan20b09c22009-05-08 17:46:40 -0400216 i++;
217 }
218 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
219 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
220
221 return mvi;
222
223}
224
225/* FIXME */
226int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
227{
228 unsigned long i = 0, j = 0, n = 0, num = 0;
Andy Yan9870d9a2009-05-11 22:19:25 +0800229 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
230 struct mvs_info *mvi = mvi_dev->mvi_info;
Andy Yan20b09c22009-05-08 17:46:40 -0400231 struct sas_ha_struct *sha = dev->port->ha;
232
233 while (sha->sas_port[i]) {
234 if (sha->sas_port[i] == dev->port) {
235 struct asd_sas_phy *phy;
236 list_for_each_entry(phy,
237 &sha->sas_port[i]->phy_list, port_phy_el) {
238 j = 0;
239 while (sha->sas_phy[j]) {
240 if (sha->sas_phy[j] == phy)
241 break;
242 j++;
243 }
244 phyno[n] = (j >= mvi->chip->n_phy) ?
245 (j - mvi->chip->n_phy) : j;
246 num++;
247 n++;
248 }
249 break;
250 }
251 i++;
252 }
253 return num;
254}
255
Xiangliang Yu534ff102011-05-24 22:26:50 +0800256struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
257 u8 reg_set)
258{
259 u32 dev_no;
260 for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
261 if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
262 continue;
263
264 if (mvi->devices[dev_no].taskfileset == reg_set)
265 return &mvi->devices[dev_no];
266 }
267 return NULL;
268}
269
Andy Yan20b09c22009-05-08 17:46:40 -0400270static inline void mvs_free_reg_set(struct mvs_info *mvi,
271 struct mvs_device *dev)
272{
273 if (!dev) {
274 mv_printk("device has been free.\n");
275 return;
276 }
Andy Yan20b09c22009-05-08 17:46:40 -0400277 if (dev->taskfileset == MVS_ID_NOT_MAPPED)
278 return;
279 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
280}
281
282static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
283 struct mvs_device *dev)
284{
285 if (dev->taskfileset != MVS_ID_NOT_MAPPED)
286 return 0;
287 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
288}
289
290void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
291{
292 u32 no;
293 for_each_phy(phy_mask, phy_mask, no) {
294 if (!(phy_mask & 1))
295 continue;
296 MVS_CHIP_DISP->phy_reset(mvi, no, hard);
297 }
298}
299
300/* FIXME: locking? */
301int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
302 void *funcdata)
303{
304 int rc = 0, phy_id = sas_phy->id;
305 u32 tmp, i = 0, hi;
306 struct sas_ha_struct *sha = sas_phy->ha;
307 struct mvs_info *mvi = NULL;
308
309 while (sha->sas_phy[i]) {
310 if (sha->sas_phy[i] == sas_phy)
311 break;
312 i++;
313 }
314 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
315 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
316
317 switch (func) {
318 case PHY_FUNC_SET_LINK_RATE:
319 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
320 break;
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400321
322 case PHY_FUNC_HARD_RESET:
Andy Yan20b09c22009-05-08 17:46:40 -0400323 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400324 if (tmp & PHY_RST_HARD)
325 break;
Andy Yan20b09c22009-05-08 17:46:40 -0400326 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400327 break;
328
329 case PHY_FUNC_LINK_RESET:
Andy Yan20b09c22009-05-08 17:46:40 -0400330 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
331 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400332 break;
333
334 case PHY_FUNC_DISABLE:
Andy Yan20b09c22009-05-08 17:46:40 -0400335 MVS_CHIP_DISP->phy_disable(mvi, phy_id);
336 break;
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400337 case PHY_FUNC_RELEASE_SPINUP_HOLD:
338 default:
339 rc = -EOPNOTSUPP;
Jeff Garzikb5762942007-10-25 20:58:22 -0400340 }
Andy Yan20b09c22009-05-08 17:46:40 -0400341 msleep(200);
Jeff Garzikb5762942007-10-25 20:58:22 -0400342 return rc;
343}
344
Andy Yan20b09c22009-05-08 17:46:40 -0400345void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
346 u32 off_lo, u32 off_hi, u64 sas_addr)
347{
348 u32 lo = (u32)sas_addr;
349 u32 hi = (u32)(sas_addr>>32);
350
351 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
352 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
353 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
354 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
355}
356
Ke Wei8f261aa2008-02-23 21:15:27 +0800357static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
358{
359 struct mvs_phy *phy = &mvi->phy[i];
Andy Yan20b09c22009-05-08 17:46:40 -0400360 struct asd_sas_phy *sas_phy = &phy->sas_phy;
361 struct sas_ha_struct *sas_ha;
Ke Wei8f261aa2008-02-23 21:15:27 +0800362 if (!phy->phy_attached)
363 return;
364
Andy Yan20b09c22009-05-08 17:46:40 -0400365 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
366 && phy->phy_type & PORT_TYPE_SAS) {
367 return;
368 }
369
370 sas_ha = mvi->sas;
371 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
372
Ke Weiee1f1c22008-03-27 14:53:47 +0800373 if (sas_phy->phy) {
374 struct sas_phy *sphy = sas_phy->phy;
375
376 sphy->negotiated_linkrate = sas_phy->linkrate;
377 sphy->minimum_linkrate = phy->minimum_linkrate;
378 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
379 sphy->maximum_linkrate = phy->maximum_linkrate;
Andy Yan20b09c22009-05-08 17:46:40 -0400380 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
Ke Weiee1f1c22008-03-27 14:53:47 +0800381 }
382
Ke Wei8f261aa2008-02-23 21:15:27 +0800383 if (phy->phy_type & PORT_TYPE_SAS) {
384 struct sas_identify_frame *id;
385
386 id = (struct sas_identify_frame *)phy->frame_rcvd;
387 id->dev_type = phy->identify.device_type;
388 id->initiator_bits = SAS_PROTOCOL_ALL;
389 id->target_bits = phy->identify.target_port_protocols;
390 } else if (phy->phy_type & PORT_TYPE_SATA) {
Andy Yan20b09c22009-05-08 17:46:40 -0400391 /*Nothing*/
Ke Wei8f261aa2008-02-23 21:15:27 +0800392 }
Andy Yan20b09c22009-05-08 17:46:40 -0400393 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
394
395 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
396
397 mvi->sas->notify_port_event(sas_phy,
Ke Wei8f261aa2008-02-23 21:15:27 +0800398 PORTE_BYTES_DMAED);
399}
400
Andy Yan20b09c22009-05-08 17:46:40 -0400401int mvs_slave_alloc(struct scsi_device *scsi_dev)
402{
403 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
404 if (dev_is_sata(dev)) {
405 /* We don't need to rescan targets
406 * if REPORT_LUNS request is failed
407 */
408 if (scsi_dev->lun > 0)
409 return -ENXIO;
410 scsi_dev->tagged_supported = 1;
411 }
412
413 return sas_slave_alloc(scsi_dev);
414}
415
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400416int mvs_slave_configure(struct scsi_device *sdev)
Ke Wei8f261aa2008-02-23 21:15:27 +0800417{
Ke Weiee1f1c22008-03-27 14:53:47 +0800418 struct domain_device *dev = sdev_to_domain_dev(sdev);
419 int ret = sas_slave_configure(sdev);
Ke Wei8f261aa2008-02-23 21:15:27 +0800420
Ke Weiee1f1c22008-03-27 14:53:47 +0800421 if (ret)
422 return ret;
Ke Weiee1f1c22008-03-27 14:53:47 +0800423 if (dev_is_sata(dev)) {
Andy Yan20b09c22009-05-08 17:46:40 -0400424 /* may set PIO mode */
425 #if MV_DISABLE_NCQ
426 struct ata_port *ap = dev->sata_dev.ap;
427 struct ata_device *adev = ap->link.device;
428 adev->flags |= ATA_DFLAG_NCQ_OFF;
Ke Weiee1f1c22008-03-27 14:53:47 +0800429 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
Andy Yan20b09c22009-05-08 17:46:40 -0400430 #endif
Ke Weiee1f1c22008-03-27 14:53:47 +0800431 }
432 return 0;
Jeff Garzikb5762942007-10-25 20:58:22 -0400433}
434
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400435void mvs_scan_start(struct Scsi_Host *shost)
Jeff Garzikb5762942007-10-25 20:58:22 -0400436{
Andy Yan20b09c22009-05-08 17:46:40 -0400437 int i, j;
438 unsigned short core_nr;
439 struct mvs_info *mvi;
440 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
Jeff Garzikb5762942007-10-25 20:58:22 -0400441
Andy Yan20b09c22009-05-08 17:46:40 -0400442 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
443
444 for (j = 0; j < core_nr; j++) {
445 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
446 for (i = 0; i < mvi->chip->n_phy; ++i)
447 mvs_bytes_dmaed(mvi, i);
Jeff Garzikb5762942007-10-25 20:58:22 -0400448 }
Jeff Garzikb5762942007-10-25 20:58:22 -0400449}
450
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400451int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
Jeff Garzikb5762942007-10-25 20:58:22 -0400452{
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400453 /* give the phy enabling interrupt event time to come in (1s
454 * is empirically about all it takes) */
455 if (time < HZ)
Ke Wei8f261aa2008-02-23 21:15:27 +0800456 return 0;
Jeff Garzikdd4969a2009-05-08 17:44:01 -0400457 /* Wait for discovery to finish */
458 scsi_flush_work(shost);
459 return 1;
Jeff Garzikb5762942007-10-25 20:58:22 -0400460}
461
Ke Wei8f261aa2008-02-23 21:15:27 +0800462static int mvs_task_prep_smp(struct mvs_info *mvi,
463 struct mvs_task_exec_info *tei)
Jeff Garzikb5762942007-10-25 20:58:22 -0400464{
Ke Wei8f261aa2008-02-23 21:15:27 +0800465 int elem, rc, i;
466 struct sas_task *task = tei->task;
Jeff Garzikb5762942007-10-25 20:58:22 -0400467 struct mvs_cmd_hdr *hdr = tei->hdr;
Andy Yan20b09c22009-05-08 17:46:40 -0400468 struct domain_device *dev = task->dev;
469 struct asd_sas_port *sas_port = dev->port;
Jeff Garzikb5762942007-10-25 20:58:22 -0400470 struct scatterlist *sg_req, *sg_resp;
Ke Wei8f261aa2008-02-23 21:15:27 +0800471 u32 req_len, resp_len, tag = tei->tag;
472 void *buf_tmp;
473 u8 *buf_oaf;
474 dma_addr_t buf_tmp_dma;
Andy Yan20b09c22009-05-08 17:46:40 -0400475 void *buf_prd;
Ke Wei8f261aa2008-02-23 21:15:27 +0800476 struct mvs_slot_info *slot = &mvi->slot_info[tag];
Ke Wei8f261aa2008-02-23 21:15:27 +0800477 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
478#if _MV_DUMP
479 u8 *buf_cmd;
480 void *from;
481#endif
Jeff Garzikb5762942007-10-25 20:58:22 -0400482 /*
483 * DMA-map SMP request, response buffers
484 */
Ke Wei8f261aa2008-02-23 21:15:27 +0800485 sg_req = &task->smp_task.smp_req;
Andy Yan20b09c22009-05-08 17:46:40 -0400486 elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
Jeff Garzikb5762942007-10-25 20:58:22 -0400487 if (!elem)
488 return -ENOMEM;
489 req_len = sg_dma_len(sg_req);
490
Ke Wei8f261aa2008-02-23 21:15:27 +0800491 sg_resp = &task->smp_task.smp_resp;
Andy Yan20b09c22009-05-08 17:46:40 -0400492 elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
Jeff Garzikb5762942007-10-25 20:58:22 -0400493 if (!elem) {
494 rc = -ENOMEM;
495 goto err_out;
496 }
Andy Yan20b09c22009-05-08 17:46:40 -0400497 resp_len = SB_RFB_MAX;
Jeff Garzikb5762942007-10-25 20:58:22 -0400498
499 /* must be in dwords */
500 if ((req_len & 0x3) || (resp_len & 0x3)) {
501 rc = -EINVAL;
502 goto err_out_2;
503 }
504
505 /*
Ke Wei8f261aa2008-02-23 21:15:27 +0800506 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
Jeff Garzikb5762942007-10-25 20:58:22 -0400507 */
508
Andy Yan20b09c22009-05-08 17:46:40 -0400509 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
Ke Wei8f261aa2008-02-23 21:15:27 +0800510 buf_tmp = slot->buf;
511 buf_tmp_dma = slot->buf_dma;
Jeff Garzikb5762942007-10-25 20:58:22 -0400512
Ke Wei8f261aa2008-02-23 21:15:27 +0800513#if _MV_DUMP
514 buf_cmd = buf_tmp;
515 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
516 buf_tmp += req_len;
517 buf_tmp_dma += req_len;
518 slot->cmd_size = req_len;
519#else
520 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
521#endif
522
523 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
524 buf_oaf = buf_tmp;
525 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
526
527 buf_tmp += MVS_OAF_SZ;
528 buf_tmp_dma += MVS_OAF_SZ;
529
Andy Yan20b09c22009-05-08 17:46:40 -0400530 /* region 3: PRD table *********************************** */
Ke Wei8f261aa2008-02-23 21:15:27 +0800531 buf_prd = buf_tmp;
532 if (tei->n_elem)
533 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
534 else
535 hdr->prd_tbl = 0;
536
Andy Yan20b09c22009-05-08 17:46:40 -0400537 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
Ke Wei8f261aa2008-02-23 21:15:27 +0800538 buf_tmp += i;
539 buf_tmp_dma += i;
540
541 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
542 slot->response = buf_tmp;
543 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
Andy Yan20b09c22009-05-08 17:46:40 -0400544 if (mvi->flags & MVF_FLAG_SOC)
545 hdr->reserved[0] = 0;
Ke Wei8f261aa2008-02-23 21:15:27 +0800546
547 /*
548 * Fill in TX ring and command slot header
549 */
550 slot->tx = mvi->tx_prod;
551 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
552 TXQ_MODE_I | tag |
553 (sas_port->phy_mask << TXQ_PHY_SHIFT));
554
555 hdr->flags |= flags;
556 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
Jeff Garzikb5762942007-10-25 20:58:22 -0400557 hdr->tags = cpu_to_le32(tag);
558 hdr->data_len = 0;
Jeff Garzikb5762942007-10-25 20:58:22 -0400559
Ke Wei8f261aa2008-02-23 21:15:27 +0800560 /* generate open address frame hdr (first 12 bytes) */
Andy Yan20b09c22009-05-08 17:46:40 -0400561 /* initiator, SMP, ftype 1h */
562 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
563 buf_oaf[1] = dev->linkrate & 0xf;
Ke Wei8f261aa2008-02-23 21:15:27 +0800564 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
Andy Yan20b09c22009-05-08 17:46:40 -0400565 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
Ke Wei8f261aa2008-02-23 21:15:27 +0800566
567 /* fill in PRD (scatter/gather) table, if any */
Andy Yan20b09c22009-05-08 17:46:40 -0400568 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
Ke Wei8f261aa2008-02-23 21:15:27 +0800569
570#if _MV_DUMP
571 /* copy cmd table */
572 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
573 memcpy(buf_cmd, from + sg_req->offset, req_len);
574 kunmap_atomic(from, KM_IRQ0);
575#endif
Jeff Garzikb5762942007-10-25 20:58:22 -0400576 return 0;
577
578err_out_2:
Andy Yan20b09c22009-05-08 17:46:40 -0400579 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
Jeff Garzikb5762942007-10-25 20:58:22 -0400580 PCI_DMA_FROMDEVICE);
581err_out:
Andy Yan20b09c22009-05-08 17:46:40 -0400582 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
Jeff Garzikb5762942007-10-25 20:58:22 -0400583 PCI_DMA_TODEVICE);
584 return rc;
585}
586
Ke Wei4e52fc02008-03-27 14:54:50 +0800587static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
Ke Wei8f261aa2008-02-23 21:15:27 +0800588{
Ke Wei8f261aa2008-02-23 21:15:27 +0800589 struct ata_queued_cmd *qc = task->uldd_task;
590
Ke Wei4e52fc02008-03-27 14:54:50 +0800591 if (qc) {
592 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
593 qc->tf.command == ATA_CMD_FPDMA_READ) {
594 *tag = qc->tag;
595 return 1;
596 }
597 }
Ke Wei8f261aa2008-02-23 21:15:27 +0800598
Ke Wei4e52fc02008-03-27 14:54:50 +0800599 return 0;
Ke Wei8f261aa2008-02-23 21:15:27 +0800600}
601
Jeff Garzikb5762942007-10-25 20:58:22 -0400602static int mvs_task_prep_ata(struct mvs_info *mvi,
603 struct mvs_task_exec_info *tei)
604{
605 struct sas_task *task = tei->task;
606 struct domain_device *dev = task->dev;
Andy Yanf9da3be2009-05-14 20:41:21 -0400607 struct mvs_device *mvi_dev = dev->lldd_dev;
Jeff Garzikb5762942007-10-25 20:58:22 -0400608 struct mvs_cmd_hdr *hdr = tei->hdr;
609 struct asd_sas_port *sas_port = dev->port;
Ke Wei8f261aa2008-02-23 21:15:27 +0800610 struct mvs_slot_info *slot;
Andy Yan20b09c22009-05-08 17:46:40 -0400611 void *buf_prd;
612 u32 tag = tei->tag, hdr_tag;
613 u32 flags, del_q;
Jeff Garzikb5762942007-10-25 20:58:22 -0400614 void *buf_tmp;
615 u8 *buf_cmd, *buf_oaf;
616 dma_addr_t buf_tmp_dma;
Ke Wei8f261aa2008-02-23 21:15:27 +0800617 u32 i, req_len, resp_len;
618 const u32 max_resp_len = SB_RFB_MAX;
Jeff Garzikb5762942007-10-25 20:58:22 -0400619
Andy Yan20b09c22009-05-08 17:46:40 -0400620 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
621 mv_dprintk("Have not enough regiset for dev %d.\n",
622 mvi_dev->device_id);
Ke Wei8f261aa2008-02-23 21:15:27 +0800623 return -EBUSY;
Andy Yan20b09c22009-05-08 17:46:40 -0400624 }
Ke Wei8f261aa2008-02-23 21:15:27 +0800625 slot = &mvi->slot_info[tag];
626 slot->tx = mvi->tx_prod;
Andy Yan20b09c22009-05-08 17:46:40 -0400627 del_q = TXQ_MODE_I | tag |
628 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
629 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
630 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
631 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
Jeff Garzikb5762942007-10-25 20:58:22 -0400632
Andy Yan20b09c22009-05-08 17:46:40 -0400633 if (task->data_dir == DMA_FROM_DEVICE)
634 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
635 else
636 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
Xiangliang Yu8882f082011-05-24 22:33:11 +0800637
Jeff Garzikb5762942007-10-25 20:58:22 -0400638 if (task->ata_task.use_ncq)
639 flags |= MCH_FPDMA;
Ke Wei8f261aa2008-02-23 21:15:27 +0800640 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
641 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
642 flags |= MCH_ATAPI;
643 }
644
Jeff Garzikb5762942007-10-25 20:58:22 -0400645 /* FIXME: fill in port multiplier number */
646
647 hdr->flags = cpu_to_le32(flags);
Ke Wei8f261aa2008-02-23 21:15:27 +0800648
649 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
Andy Yan20b09c22009-05-08 17:46:40 -0400650 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
651 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
Ke Wei4e52fc02008-03-27 14:54:50 +0800652 else
Andy Yan20b09c22009-05-08 17:46:40 -0400653 hdr_tag = tag;
654
655 hdr->tags = cpu_to_le32(hdr_tag);
656
Jeff Garzikb5762942007-10-25 20:58:22 -0400657 hdr->data_len = cpu_to_le32(task->total_xfer_len);
658
659 /*
660 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
661 */
Jeff Garzikb5762942007-10-25 20:58:22 -0400662
Ke Wei8f261aa2008-02-23 21:15:27 +0800663 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
664 buf_cmd = buf_tmp = slot->buf;
Jeff Garzikb5762942007-10-25 20:58:22 -0400665 buf_tmp_dma = slot->buf_dma;
666
667 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
668
669 buf_tmp += MVS_ATA_CMD_SZ;
670 buf_tmp_dma += MVS_ATA_CMD_SZ;
Ke Wei8f261aa2008-02-23 21:15:27 +0800671#if _MV_DUMP
672 slot->cmd_size = MVS_ATA_CMD_SZ;
673#endif
Jeff Garzikb5762942007-10-25 20:58:22 -0400674
Ke Wei8f261aa2008-02-23 21:15:27 +0800675 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
Jeff Garzikb5762942007-10-25 20:58:22 -0400676 /* used for STP. unused for SATA? */
677 buf_oaf = buf_tmp;
678 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
679
680 buf_tmp += MVS_OAF_SZ;
681 buf_tmp_dma += MVS_OAF_SZ;
682
Ke Wei8f261aa2008-02-23 21:15:27 +0800683 /* region 3: PRD table ********************************************* */
Jeff Garzikb5762942007-10-25 20:58:22 -0400684 buf_prd = buf_tmp;
Andy Yan20b09c22009-05-08 17:46:40 -0400685
Ke Wei8f261aa2008-02-23 21:15:27 +0800686 if (tei->n_elem)
687 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
688 else
689 hdr->prd_tbl = 0;
Andy Yan20b09c22009-05-08 17:46:40 -0400690 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
Jeff Garzikb5762942007-10-25 20:58:22 -0400691
Jeff Garzikb5762942007-10-25 20:58:22 -0400692 buf_tmp += i;
693 buf_tmp_dma += i;
694
Ke Wei8f261aa2008-02-23 21:15:27 +0800695 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
Jeff Garzikb5762942007-10-25 20:58:22 -0400696 /* FIXME: probably unused, for SATA. kept here just in case
697 * we get a STP/SATA error information record
698 */
699 slot->response = buf_tmp;
700 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
Andy Yan20b09c22009-05-08 17:46:40 -0400701 if (mvi->flags & MVF_FLAG_SOC)
702 hdr->reserved[0] = 0;
Jeff Garzikb5762942007-10-25 20:58:22 -0400703
Ke Wei8f261aa2008-02-23 21:15:27 +0800704 req_len = sizeof(struct host_to_dev_fis);
Jeff Garzikb5762942007-10-25 20:58:22 -0400705 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
Ke Wei8f261aa2008-02-23 21:15:27 +0800706 sizeof(struct mvs_err_info) - i;
Jeff Garzikb5762942007-10-25 20:58:22 -0400707
708 /* request, response lengths */
Ke Wei8f261aa2008-02-23 21:15:27 +0800709 resp_len = min(resp_len, max_resp_len);
Jeff Garzikb5762942007-10-25 20:58:22 -0400710 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
711
Andy Yan20b09c22009-05-08 17:46:40 -0400712 if (likely(!task->ata_task.device_control_reg_update))
713 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
Jeff Garzikb5762942007-10-25 20:58:22 -0400714 /* fill in command FIS and ATAPI CDB */
Ke Wei8f261aa2008-02-23 21:15:27 +0800715 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
716 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
717 memcpy(buf_cmd + STP_ATAPI_CMD,
718 task->ata_task.atapi_packet, 16);
719
720 /* generate open address frame hdr (first 12 bytes) */
Andy Yan20b09c22009-05-08 17:46:40 -0400721 /* initiator, STP, ftype 1h */
722 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
723 buf_oaf[1] = dev->linkrate & 0xf;
724 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
725 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
Jeff Garzikb5762942007-10-25 20:58:22 -0400726
727 /* fill in PRD (scatter/gather) table, if any */
Andy Yan20b09c22009-05-08 17:46:40 -0400728 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
Xiangliang Yu8882f082011-05-24 22:33:11 +0800729
Andy Yan20b09c22009-05-08 17:46:40 -0400730 if (task->data_dir == DMA_FROM_DEVICE)
Xiangliang Yu8882f082011-05-24 22:33:11 +0800731 MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
Andy Yan20b09c22009-05-08 17:46:40 -0400732 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
Xiangliang Yu8882f082011-05-24 22:33:11 +0800733
Jeff Garzikb5762942007-10-25 20:58:22 -0400734 return 0;
735}
736
737static int mvs_task_prep_ssp(struct mvs_info *mvi,
Andy Yan20b09c22009-05-08 17:46:40 -0400738 struct mvs_task_exec_info *tei, int is_tmf,
739 struct mvs_tmf_task *tmf)
Jeff Garzikb5762942007-10-25 20:58:22 -0400740{
741 struct sas_task *task = tei->task;
Jeff Garzikb5762942007-10-25 20:58:22 -0400742 struct mvs_cmd_hdr *hdr = tei->hdr;
Ke Wei8f261aa2008-02-23 21:15:27 +0800743 struct mvs_port *port = tei->port;
Andy Yan20b09c22009-05-08 17:46:40 -0400744 struct domain_device *dev = task->dev;
Andy Yanf9da3be2009-05-14 20:41:21 -0400745 struct mvs_device *mvi_dev = dev->lldd_dev;
Andy Yan20b09c22009-05-08 17:46:40 -0400746 struct asd_sas_port *sas_port = dev->port;
Jeff Garzikb5762942007-10-25 20:58:22 -0400747 struct mvs_slot_info *slot;
Andy Yan20b09c22009-05-08 17:46:40 -0400748 void *buf_prd;
Jeff Garzikb5762942007-10-25 20:58:22 -0400749 struct ssp_frame_hdr *ssp_hdr;
750 void *buf_tmp;
751 u8 *buf_cmd, *buf_oaf, fburst = 0;
752 dma_addr_t buf_tmp_dma;
753 u32 flags;
Ke Wei8f261aa2008-02-23 21:15:27 +0800754 u32 resp_len, req_len, i, tag = tei->tag;
755 const u32 max_resp_len = SB_RFB_MAX;
Andy Yan20b09c22009-05-08 17:46:40 -0400756 u32 phy_mask;
Jeff Garzikb5762942007-10-25 20:58:22 -0400757
758 slot = &mvi->slot_info[tag];
759
Andy Yan20b09c22009-05-08 17:46:40 -0400760 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
761 sas_port->phy_mask) & TXQ_PHY_MASK;
762
Ke Wei8f261aa2008-02-23 21:15:27 +0800763 slot->tx = mvi->tx_prod;
764 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
765 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
Ke Wei4e52fc02008-03-27 14:54:50 +0800766 (phy_mask << TXQ_PHY_SHIFT));
Jeff Garzikb5762942007-10-25 20:58:22 -0400767
768 flags = MCH_RETRY;
769 if (task->ssp_task.enable_first_burst) {
770 flags |= MCH_FBURST;
771 fburst = (1 << 7);
772 }
Andy Yan2b288132009-05-11 20:01:55 +0800773 if (is_tmf)
774 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
Andy Yan2b288132009-05-11 20:01:55 +0800775 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
Jeff Garzikb5762942007-10-25 20:58:22 -0400776 hdr->tags = cpu_to_le32(tag);
777 hdr->data_len = cpu_to_le32(task->total_xfer_len);
778
779 /*
780 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
781 */
Jeff Garzikb5762942007-10-25 20:58:22 -0400782
Ke Wei8f261aa2008-02-23 21:15:27 +0800783 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
784 buf_cmd = buf_tmp = slot->buf;
Jeff Garzikb5762942007-10-25 20:58:22 -0400785 buf_tmp_dma = slot->buf_dma;
786
787 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
788
789 buf_tmp += MVS_SSP_CMD_SZ;
790 buf_tmp_dma += MVS_SSP_CMD_SZ;
Ke Wei8f261aa2008-02-23 21:15:27 +0800791#if _MV_DUMP
792 slot->cmd_size = MVS_SSP_CMD_SZ;
793#endif
Jeff Garzikb5762942007-10-25 20:58:22 -0400794
Ke Wei8f261aa2008-02-23 21:15:27 +0800795 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
Jeff Garzikb5762942007-10-25 20:58:22 -0400796 buf_oaf = buf_tmp;
797 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
798
799 buf_tmp += MVS_OAF_SZ;
800 buf_tmp_dma += MVS_OAF_SZ;
801
Ke Wei8f261aa2008-02-23 21:15:27 +0800802 /* region 3: PRD table ********************************************* */
Jeff Garzikb5762942007-10-25 20:58:22 -0400803 buf_prd = buf_tmp;
Ke Wei8f261aa2008-02-23 21:15:27 +0800804 if (tei->n_elem)
805 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
806 else
807 hdr->prd_tbl = 0;
Jeff Garzikb5762942007-10-25 20:58:22 -0400808
Andy Yan20b09c22009-05-08 17:46:40 -0400809 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
Jeff Garzikb5762942007-10-25 20:58:22 -0400810 buf_tmp += i;
811 buf_tmp_dma += i;
812
Ke Wei8f261aa2008-02-23 21:15:27 +0800813 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
Jeff Garzikb5762942007-10-25 20:58:22 -0400814 slot->response = buf_tmp;
815 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
Andy Yan20b09c22009-05-08 17:46:40 -0400816 if (mvi->flags & MVF_FLAG_SOC)
817 hdr->reserved[0] = 0;
Jeff Garzikb5762942007-10-25 20:58:22 -0400818
Jeff Garzikb5762942007-10-25 20:58:22 -0400819 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
Ke Wei8f261aa2008-02-23 21:15:27 +0800820 sizeof(struct mvs_err_info) - i;
821 resp_len = min(resp_len, max_resp_len);
822
823 req_len = sizeof(struct ssp_frame_hdr) + 28;
Jeff Garzikb5762942007-10-25 20:58:22 -0400824
825 /* request, response lengths */
826 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
827
828 /* generate open address frame hdr (first 12 bytes) */
Andy Yan20b09c22009-05-08 17:46:40 -0400829 /* initiator, SSP, ftype 1h */
830 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
831 buf_oaf[1] = dev->linkrate & 0xf;
832 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
833 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
Jeff Garzikb5762942007-10-25 20:58:22 -0400834
Ke Wei8f261aa2008-02-23 21:15:27 +0800835 /* fill in SSP frame header (Command Table.SSP frame header) */
836 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
Andy Yan20b09c22009-05-08 17:46:40 -0400837
838 if (is_tmf)
839 ssp_hdr->frame_type = SSP_TASK;
840 else
841 ssp_hdr->frame_type = SSP_COMMAND;
842
843 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
Jeff Garzikb5762942007-10-25 20:58:22 -0400844 HASHED_SAS_ADDR_SIZE);
845 memcpy(ssp_hdr->hashed_src_addr,
Andy Yan20b09c22009-05-08 17:46:40 -0400846 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
Jeff Garzikb5762942007-10-25 20:58:22 -0400847 ssp_hdr->tag = cpu_to_be16(tag);
848
Andy Yan20b09c22009-05-08 17:46:40 -0400849 /* fill in IU for TASK and Command Frame */
Jeff Garzikb5762942007-10-25 20:58:22 -0400850 buf_cmd += sizeof(*ssp_hdr);
851 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
Jeff Garzikb5762942007-10-25 20:58:22 -0400852
Andy Yan20b09c22009-05-08 17:46:40 -0400853 if (ssp_hdr->frame_type != SSP_TASK) {
854 buf_cmd[9] = fburst | task->ssp_task.task_attr |
855 (task->ssp_task.task_prio << 3);
856 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
857 } else{
858 buf_cmd[10] = tmf->tmf;
859 switch (tmf->tmf) {
860 case TMF_ABORT_TASK:
861 case TMF_QUERY_TASK:
862 buf_cmd[12] =
863 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
864 buf_cmd[13] =
865 tmf->tag_of_task_to_be_managed & 0xff;
866 break;
867 default:
868 break;
869 }
Jeff Garzikb5762942007-10-25 20:58:22 -0400870 }
Andy Yan20b09c22009-05-08 17:46:40 -0400871 /* fill in PRD (scatter/gather) table, if any */
872 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
Jeff Garzikb5762942007-10-25 20:58:22 -0400873 return 0;
874}
875
Andy Yan20b09c22009-05-08 17:46:40 -0400876#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700877static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
878 struct mvs_tmf_task *tmf, int *pass)
Jeff Garzikb5762942007-10-25 20:58:22 -0400879{
Ke Wei8f261aa2008-02-23 21:15:27 +0800880 struct domain_device *dev = task->dev;
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700881 struct mvs_device *mvi_dev = dev->lldd_dev;
Jeff Garzikb5762942007-10-25 20:58:22 -0400882 struct mvs_task_exec_info tei;
Ke Wei4e52fc02008-03-27 14:54:50 +0800883 struct mvs_slot_info *slot;
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700884 u32 tag = 0xdeadbeef, n_elem = 0;
885 int rc = 0;
Jeff Garzikb5762942007-10-25 20:58:22 -0400886
Andy Yan20b09c22009-05-08 17:46:40 -0400887 if (!dev->port) {
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700888 struct task_status_struct *tsm = &task->task_status;
Andy Yan20b09c22009-05-08 17:46:40 -0400889
890 tsm->resp = SAS_TASK_UNDELIVERED;
891 tsm->stat = SAS_PHY_DOWN;
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700892 /*
893 * libsas will use dev->port, should
894 * not call task_done for sata
895 */
Srinivas9dc9fd92010-02-15 00:00:00 -0600896 if (dev->dev_type != SATA_DEV)
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700897 task->task_done(task);
898 return rc;
Andy Yan20b09c22009-05-08 17:46:40 -0400899 }
900
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700901 if (DEV_IS_GONE(mvi_dev)) {
902 if (mvi_dev)
903 mv_dprintk("device %d not ready.\n",
904 mvi_dev->device_id);
905 else
906 mv_dprintk("device %016llx not ready.\n",
907 SAS_ADDR(dev->sas_addr));
Andy Yan20b09c22009-05-08 17:46:40 -0400908
909 rc = SAS_PHY_DOWN;
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700910 return rc;
911 }
912 tei.port = dev->port->lldd_port;
913 if (tei.port && !tei.port->port_attached && !tmf) {
914 if (sas_protocol_ata(task->task_proto)) {
915 struct task_status_struct *ts = &task->task_status;
916 mv_dprintk("SATA/STP port %d does not attach"
917 "device.\n", dev->port->id);
918 ts->resp = SAS_TASK_COMPLETE;
919 ts->stat = SAS_PHY_DOWN;
Andy Yan20b09c22009-05-08 17:46:40 -0400920
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700921 task->task_done(task);
Jeff Garzikb5762942007-10-25 20:58:22 -0400922
Ke Wei8f261aa2008-02-23 21:15:27 +0800923 } else {
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700924 struct task_status_struct *ts = &task->task_status;
925 mv_dprintk("SAS port %d does not attach"
926 "device.\n", dev->port->id);
927 ts->resp = SAS_TASK_UNDELIVERED;
928 ts->stat = SAS_PHY_DOWN;
929 task->task_done(task);
Ke Wei8f261aa2008-02-23 21:15:27 +0800930 }
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700931 return rc;
932 }
Jeff Garzikb5762942007-10-25 20:58:22 -0400933
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700934 if (!sas_protocol_ata(task->task_proto)) {
935 if (task->num_scatter) {
936 n_elem = dma_map_sg(mvi->dev,
937 task->scatter,
938 task->num_scatter,
939 task->data_dir);
940 if (!n_elem) {
941 rc = -ENOMEM;
942 goto prep_out;
943 }
Ke Wei8f261aa2008-02-23 21:15:27 +0800944 }
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700945 } else {
946 n_elem = task->num_scatter;
947 }
Jeff Garzikb5762942007-10-25 20:58:22 -0400948
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700949 rc = mvs_tag_alloc(mvi, &tag);
950 if (rc)
951 goto err_out;
Jeff Garzikb5762942007-10-25 20:58:22 -0400952
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700953 slot = &mvi->slot_info[tag];
Srinivas9dc9fd92010-02-15 00:00:00 -0600954
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700955 task->lldd_task = NULL;
956 slot->n_elem = n_elem;
957 slot->slot_tag = tag;
Jeff Garzikb5762942007-10-25 20:58:22 -0400958
Xiangliang Yu0b15fb12011-04-26 06:36:51 -0700959 slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
960 if (!slot->buf)
961 goto err_out_tag;
962 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
963
964 tei.task = task;
965 tei.hdr = &mvi->slot[tag];
966 tei.tag = tag;
967 tei.n_elem = n_elem;
968 switch (task->task_proto) {
969 case SAS_PROTOCOL_SMP:
970 rc = mvs_task_prep_smp(mvi, &tei);
971 break;
972 case SAS_PROTOCOL_SSP:
973 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
974 break;
975 case SAS_PROTOCOL_SATA:
976 case SAS_PROTOCOL_STP:
977 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
978 rc = mvs_task_prep_ata(mvi, &tei);
979 break;
980 default:
981 dev_printk(KERN_ERR, mvi->dev,
982 "unknown sas_task proto: 0x%x\n",
983 task->task_proto);
984 rc = -EINVAL;
985 break;
986 }
987
988 if (rc) {
989 mv_dprintk("rc is %x\n", rc);
990 goto err_out_slot_buf;
991 }
992 slot->task = task;
993 slot->port = tei.port;
994 task->lldd_task = slot;
995 list_add_tail(&slot->entry, &tei.port->list);
996 spin_lock(&task->task_state_lock);
997 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
998 spin_unlock(&task->task_state_lock);
999
1000 mvs_hba_memory_dump(mvi, tag, task->task_proto);
1001 mvi_dev->running_req++;
1002 ++(*pass);
1003 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1004
1005 return rc;
1006
1007err_out_slot_buf:
1008 pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
Jeff Garzikb5762942007-10-25 20:58:22 -04001009err_out_tag:
Ke Wei8f261aa2008-02-23 21:15:27 +08001010 mvs_tag_free(mvi, tag);
Jeff Garzikb5762942007-10-25 20:58:22 -04001011err_out:
Andy Yan20b09c22009-05-08 17:46:40 -04001012
Xiangliang Yu0b15fb12011-04-26 06:36:51 -07001013 dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc);
1014 if (!sas_protocol_ata(task->task_proto))
Ke Wei8f261aa2008-02-23 21:15:27 +08001015 if (n_elem)
Xiangliang Yu0b15fb12011-04-26 06:36:51 -07001016 dma_unmap_sg(mvi->dev, task->scatter, n_elem,
1017 task->data_dir);
1018prep_out:
1019 return rc;
1020}
1021
1022static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags)
1023{
1024 struct mvs_task_list *first = NULL;
1025
1026 for (; *num > 0; --*num) {
1027 struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
1028
1029 if (!mvs_list)
1030 break;
1031
1032 INIT_LIST_HEAD(&mvs_list->list);
1033 if (!first)
1034 first = mvs_list;
1035 else
1036 list_add_tail(&mvs_list->list, &first->list);
1037
1038 }
1039
1040 return first;
1041}
1042
1043static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
1044{
1045 LIST_HEAD(list);
1046 struct list_head *pos, *a;
1047 struct mvs_task_list *mlist = NULL;
1048
1049 __list_add(&list, mvs_list->list.prev, &mvs_list->list);
1050
1051 list_for_each_safe(pos, a, &list) {
1052 list_del_init(pos);
1053 mlist = list_entry(pos, struct mvs_task_list, list);
1054 kmem_cache_free(mvs_task_list_cache, mlist);
1055 }
1056}
1057
1058static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
1059 struct completion *completion, int is_tmf,
1060 struct mvs_tmf_task *tmf)
1061{
1062 struct domain_device *dev = task->dev;
1063 struct mvs_info *mvi = NULL;
1064 u32 rc = 0;
1065 u32 pass = 0;
1066 unsigned long flags = 0;
1067
1068 mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
1069
1070 if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
1071 spin_unlock_irq(dev->sata_dev.ap->lock);
1072
1073 spin_lock_irqsave(&mvi->lock, flags);
1074 rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
1075 if (rc)
1076 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1077
1078 if (likely(pass))
1079 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
1080 (MVS_CHIP_SLOT_SZ - 1));
Andy Yan0b84b702009-05-11 20:05:26 +08001081 spin_unlock_irqrestore(&mvi->lock, flags);
Xiangliang Yu0b15fb12011-04-26 06:36:51 -07001082
1083 if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
1084 spin_lock_irq(dev->sata_dev.ap->lock);
1085
1086 return rc;
1087}
1088
1089static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
1090 struct completion *completion, int is_tmf,
1091 struct mvs_tmf_task *tmf)
1092{
1093 struct domain_device *dev = task->dev;
1094 struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
1095 struct mvs_info *mvi = NULL;
1096 struct sas_task *t = task;
1097 struct mvs_task_list *mvs_list = NULL, *a;
1098 LIST_HEAD(q);
1099 int pass[2] = {0};
1100 u32 rc = 0;
1101 u32 n = num;
1102 unsigned long flags = 0;
1103
1104 mvs_list = mvs_task_alloc_list(&n, gfp_flags);
1105 if (n) {
1106 printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
1107 rc = -ENOMEM;
1108 goto free_list;
1109 }
1110
1111 __list_add(&q, mvs_list->list.prev, &mvs_list->list);
1112
1113 list_for_each_entry(a, &q, list) {
1114 a->task = t;
1115 t = list_entry(t->list.next, struct sas_task, list);
1116 }
1117
1118 list_for_each_entry(a, &q , list) {
1119
1120 t = a->task;
1121 mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
1122
1123 spin_lock_irqsave(&mvi->lock, flags);
1124 rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
1125 if (rc)
1126 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1127 spin_unlock_irqrestore(&mvi->lock, flags);
1128 }
1129
1130 if (likely(pass[0]))
1131 MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
1132 (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1133
1134 if (likely(pass[1]))
1135 MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
1136 (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1137
1138 list_del_init(&q);
1139
1140free_list:
1141 if (mvs_list)
1142 mvs_task_free_list(mvs_list);
1143
Jeff Garzikb5762942007-10-25 20:58:22 -04001144 return rc;
1145}
1146
Andy Yan20b09c22009-05-08 17:46:40 -04001147int mvs_queue_command(struct sas_task *task, const int num,
1148 gfp_t gfp_flags)
1149{
Xiangliang Yu0b15fb12011-04-26 06:36:51 -07001150 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1151 struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
1152
1153 if (sas->lldd_max_execute_num < 2)
1154 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1155 else
1156 return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
Andy Yan20b09c22009-05-08 17:46:40 -04001157}
1158
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001159static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
Ke Wei8f261aa2008-02-23 21:15:27 +08001160{
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001161 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1162 mvs_tag_clear(mvi, slot_idx);
1163}
Ke Wei8f261aa2008-02-23 21:15:27 +08001164
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001165static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1166 struct mvs_slot_info *slot, u32 slot_idx)
1167{
Andy Yan20b09c22009-05-08 17:46:40 -04001168 if (!slot->task)
1169 return;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001170 if (!sas_protocol_ata(task->task_proto))
1171 if (slot->n_elem)
Andy Yan20b09c22009-05-08 17:46:40 -04001172 dma_unmap_sg(mvi->dev, task->scatter,
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001173 slot->n_elem, task->data_dir);
Ke Wei8f261aa2008-02-23 21:15:27 +08001174
Ke Wei8f261aa2008-02-23 21:15:27 +08001175 switch (task->task_proto) {
1176 case SAS_PROTOCOL_SMP:
Andy Yan20b09c22009-05-08 17:46:40 -04001177 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001178 PCI_DMA_FROMDEVICE);
Andy Yan20b09c22009-05-08 17:46:40 -04001179 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001180 PCI_DMA_TODEVICE);
Ke Wei8f261aa2008-02-23 21:15:27 +08001181 break;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001182
Ke Wei8f261aa2008-02-23 21:15:27 +08001183 case SAS_PROTOCOL_SATA:
1184 case SAS_PROTOCOL_STP:
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001185 case SAS_PROTOCOL_SSP:
Ke Wei8f261aa2008-02-23 21:15:27 +08001186 default:
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001187 /* do nothing */
Ke Wei8f261aa2008-02-23 21:15:27 +08001188 break;
1189 }
Xiangliang Yu0b15fb12011-04-26 06:36:51 -07001190
1191 if (slot->buf) {
1192 pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
1193 slot->buf = NULL;
1194 }
Andy Yan20b09c22009-05-08 17:46:40 -04001195 list_del_init(&slot->entry);
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001196 task->lldd_task = NULL;
1197 slot->task = NULL;
1198 slot->port = NULL;
Andy Yan20b09c22009-05-08 17:46:40 -04001199 slot->slot_tag = 0xFFFFFFFF;
1200 mvs_slot_free(mvi, slot_idx);
Ke Wei8f261aa2008-02-23 21:15:27 +08001201}
1202
1203static void mvs_update_wideport(struct mvs_info *mvi, int i)
1204{
1205 struct mvs_phy *phy = &mvi->phy[i];
1206 struct mvs_port *port = phy->port;
1207 int j, no;
1208
Andy Yan20b09c22009-05-08 17:46:40 -04001209 for_each_phy(port->wide_port_phymap, j, no) {
1210 if (j & 1) {
1211 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1212 PHYR_WIDE_PORT);
1213 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
Ke Wei8f261aa2008-02-23 21:15:27 +08001214 port->wide_port_phymap);
1215 } else {
Andy Yan20b09c22009-05-08 17:46:40 -04001216 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1217 PHYR_WIDE_PORT);
1218 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1219 0);
Ke Wei8f261aa2008-02-23 21:15:27 +08001220 }
Andy Yan20b09c22009-05-08 17:46:40 -04001221 }
Ke Wei8f261aa2008-02-23 21:15:27 +08001222}
1223
1224static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
1225{
1226 u32 tmp;
1227 struct mvs_phy *phy = &mvi->phy[i];
Andy Yan20b09c22009-05-08 17:46:40 -04001228 struct mvs_port *port = phy->port;
Ke Wei8f261aa2008-02-23 21:15:27 +08001229
Andy Yan20b09c22009-05-08 17:46:40 -04001230 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
Ke Wei8f261aa2008-02-23 21:15:27 +08001231 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
Ke Wei963829e2008-03-27 14:55:23 +08001232 if (!port)
Ke Wei8f261aa2008-02-23 21:15:27 +08001233 phy->phy_attached = 1;
1234 return tmp;
1235 }
1236
Ke Wei8f261aa2008-02-23 21:15:27 +08001237 if (port) {
1238 if (phy->phy_type & PORT_TYPE_SAS) {
1239 port->wide_port_phymap &= ~(1U << i);
1240 if (!port->wide_port_phymap)
1241 port->port_attached = 0;
1242 mvs_update_wideport(mvi, i);
1243 } else if (phy->phy_type & PORT_TYPE_SATA)
1244 port->port_attached = 0;
Ke Wei8f261aa2008-02-23 21:15:27 +08001245 phy->port = NULL;
1246 phy->phy_attached = 0;
1247 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1248 }
1249 return 0;
1250}
1251
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001252static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1253{
1254 u32 *s = (u32 *) buf;
1255
1256 if (!s)
1257 return NULL;
1258
Andy Yan20b09c22009-05-08 17:46:40 -04001259 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1260 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001261
Andy Yan20b09c22009-05-08 17:46:40 -04001262 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1263 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001264
Andy Yan20b09c22009-05-08 17:46:40 -04001265 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1266 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001267
Andy Yan20b09c22009-05-08 17:46:40 -04001268 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1269 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1270
1271 /* Workaround: take some ATAPI devices for ATA */
1272 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1273 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001274
Andy Yanf9da3be2009-05-14 20:41:21 -04001275 return s;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001276}
1277
1278static u32 mvs_is_sig_fis_received(u32 irq_status)
1279{
1280 return irq_status & PHYEV_SIG_FIS;
1281}
1282
Xiangliang Yu8882f082011-05-24 22:33:11 +08001283static void mvs_sig_remove_timer(struct mvs_phy *phy)
1284{
1285 if (phy->timer.function)
1286 del_timer(&phy->timer);
1287 phy->timer.function = NULL;
1288}
1289
Andy Yan20b09c22009-05-08 17:46:40 -04001290void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
Ke Wei8f261aa2008-02-23 21:15:27 +08001291{
1292 struct mvs_phy *phy = &mvi->phy[i];
Andy Yan20b09c22009-05-08 17:46:40 -04001293 struct sas_identify_frame *id;
Ke Wei8f261aa2008-02-23 21:15:27 +08001294
Andy Yan20b09c22009-05-08 17:46:40 -04001295 id = (struct sas_identify_frame *)phy->frame_rcvd;
Ke Wei8f261aa2008-02-23 21:15:27 +08001296
1297 if (get_st) {
Andy Yan20b09c22009-05-08 17:46:40 -04001298 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
Ke Wei8f261aa2008-02-23 21:15:27 +08001299 phy->phy_status = mvs_is_phy_ready(mvi, i);
1300 }
1301
1302 if (phy->phy_status) {
Andy Yan20b09c22009-05-08 17:46:40 -04001303 int oob_done = 0;
1304 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
Ke Wei8f261aa2008-02-23 21:15:27 +08001305
Andy Yan20b09c22009-05-08 17:46:40 -04001306 oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
Ke Wei8f261aa2008-02-23 21:15:27 +08001307
Andy Yan20b09c22009-05-08 17:46:40 -04001308 MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
1309 if (phy->phy_type & PORT_TYPE_SATA) {
1310 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1311 if (mvs_is_sig_fis_received(phy->irq_status)) {
Xiangliang Yu8882f082011-05-24 22:33:11 +08001312 mvs_sig_remove_timer(phy);
Andy Yan20b09c22009-05-08 17:46:40 -04001313 phy->phy_attached = 1;
1314 phy->att_dev_sas_addr =
1315 i + mvi->id * mvi->chip->n_phy;
1316 if (oob_done)
1317 sas_phy->oob_mode = SATA_OOB_MODE;
1318 phy->frame_rcvd_size =
1319 sizeof(struct dev_to_host_fis);
Andy Yanf9da3be2009-05-14 20:41:21 -04001320 mvs_get_d2h_reg(mvi, i, id);
Andy Yan20b09c22009-05-08 17:46:40 -04001321 } else {
1322 u32 tmp;
1323 dev_printk(KERN_DEBUG, mvi->dev,
1324 "Phy%d : No sig fis\n", i);
1325 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
1326 MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
1327 tmp | PHYEV_SIG_FIS);
1328 phy->phy_attached = 0;
1329 phy->phy_type &= ~PORT_TYPE_SATA;
Andy Yan20b09c22009-05-08 17:46:40 -04001330 goto out_done;
1331 }
Srinivas9dc9fd92010-02-15 00:00:00 -06001332 } else if (phy->phy_type & PORT_TYPE_SAS
Andy Yan20b09c22009-05-08 17:46:40 -04001333 || phy->att_dev_info & PORT_SSP_INIT_MASK) {
1334 phy->phy_attached = 1;
Ke Wei8f261aa2008-02-23 21:15:27 +08001335 phy->identify.device_type =
Andy Yan20b09c22009-05-08 17:46:40 -04001336 phy->att_dev_info & PORT_DEV_TYPE_MASK;
Ke Wei8f261aa2008-02-23 21:15:27 +08001337
1338 if (phy->identify.device_type == SAS_END_DEV)
1339 phy->identify.target_port_protocols =
1340 SAS_PROTOCOL_SSP;
1341 else if (phy->identify.device_type != NO_DEVICE)
1342 phy->identify.target_port_protocols =
1343 SAS_PROTOCOL_SMP;
Andy Yan20b09c22009-05-08 17:46:40 -04001344 if (oob_done)
Ke Wei8f261aa2008-02-23 21:15:27 +08001345 sas_phy->oob_mode = SAS_OOB_MODE;
1346 phy->frame_rcvd_size =
1347 sizeof(struct sas_identify_frame);
Ke Wei8f261aa2008-02-23 21:15:27 +08001348 }
Andy Yan20b09c22009-05-08 17:46:40 -04001349 memcpy(sas_phy->attached_sas_addr,
1350 &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
Ke Weie9ff91b2008-03-27 14:55:33 +08001351
Andy Yan20b09c22009-05-08 17:46:40 -04001352 if (MVS_CHIP_DISP->phy_work_around)
1353 MVS_CHIP_DISP->phy_work_around(mvi, i);
Ke Wei8f261aa2008-02-23 21:15:27 +08001354 }
Andy Yan20b09c22009-05-08 17:46:40 -04001355 mv_dprintk("port %d attach dev info is %x\n",
1356 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1357 mv_dprintk("port %d attach sas addr is %llx\n",
1358 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
Ke Weie9ff91b2008-03-27 14:55:33 +08001359out_done:
Ke Wei8f261aa2008-02-23 21:15:27 +08001360 if (get_st)
Andy Yan20b09c22009-05-08 17:46:40 -04001361 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
Ke Wei8f261aa2008-02-23 21:15:27 +08001362}
1363
Andy Yan20b09c22009-05-08 17:46:40 -04001364static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1365{
1366 struct sas_ha_struct *sas_ha = sas_phy->ha;
1367 struct mvs_info *mvi = NULL; int i = 0, hi;
1368 struct mvs_phy *phy = sas_phy->lldd_phy;
1369 struct asd_sas_port *sas_port = sas_phy->port;
1370 struct mvs_port *port;
1371 unsigned long flags = 0;
1372 if (!sas_port)
1373 return;
1374
1375 while (sas_ha->sas_phy[i]) {
1376 if (sas_ha->sas_phy[i] == sas_phy)
1377 break;
1378 i++;
1379 }
1380 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1381 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1382 if (sas_port->id >= mvi->chip->n_phy)
1383 port = &mvi->port[sas_port->id - mvi->chip->n_phy];
1384 else
1385 port = &mvi->port[sas_port->id];
1386 if (lock)
1387 spin_lock_irqsave(&mvi->lock, flags);
1388 port->port_attached = 1;
1389 phy->port = port;
Xiangliang Yu0b15fb12011-04-26 06:36:51 -07001390 sas_port->lldd_port = port;
Andy Yan20b09c22009-05-08 17:46:40 -04001391 if (phy->phy_type & PORT_TYPE_SAS) {
1392 port->wide_port_phymap = sas_port->phy_mask;
1393 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
1394 mvs_update_wideport(mvi, sas_phy->id);
1395 }
1396 if (lock)
1397 spin_unlock_irqrestore(&mvi->lock, flags);
1398}
1399
1400static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1401{
Srinivas9dc9fd92010-02-15 00:00:00 -06001402 struct domain_device *dev;
1403 struct mvs_phy *phy = sas_phy->lldd_phy;
1404 struct mvs_info *mvi = phy->mvi;
1405 struct asd_sas_port *port = sas_phy->port;
1406 int phy_no = 0;
1407
1408 while (phy != &mvi->phy[phy_no]) {
1409 phy_no++;
1410 if (phy_no >= MVS_MAX_PHYS)
1411 return;
1412 }
1413 list_for_each_entry(dev, &port->dev_list, dev_list_node)
1414 mvs_do_release_task(phy->mvi, phy_no, NULL);
1415
Andy Yan20b09c22009-05-08 17:46:40 -04001416}
1417
1418
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001419void mvs_port_formed(struct asd_sas_phy *sas_phy)
Ke Wei8f261aa2008-02-23 21:15:27 +08001420{
Andy Yan20b09c22009-05-08 17:46:40 -04001421 mvs_port_notify_formed(sas_phy, 1);
1422}
Ke Wei8f261aa2008-02-23 21:15:27 +08001423
Andy Yan20b09c22009-05-08 17:46:40 -04001424void mvs_port_deformed(struct asd_sas_phy *sas_phy)
1425{
1426 mvs_port_notify_deformed(sas_phy, 1);
1427}
1428
1429struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1430{
1431 u32 dev;
1432 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1433 if (mvi->devices[dev].dev_type == NO_DEVICE) {
1434 mvi->devices[dev].device_id = dev;
1435 return &mvi->devices[dev];
1436 }
Ke Wei8f261aa2008-02-23 21:15:27 +08001437 }
Andy Yan20b09c22009-05-08 17:46:40 -04001438
1439 if (dev == MVS_MAX_DEVICES)
1440 mv_printk("max support %d devices, ignore ..\n",
1441 MVS_MAX_DEVICES);
1442
1443 return NULL;
1444}
1445
1446void mvs_free_dev(struct mvs_device *mvi_dev)
1447{
1448 u32 id = mvi_dev->device_id;
1449 memset(mvi_dev, 0, sizeof(*mvi_dev));
1450 mvi_dev->device_id = id;
1451 mvi_dev->dev_type = NO_DEVICE;
1452 mvi_dev->dev_status = MVS_DEV_NORMAL;
1453 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1454}
1455
1456int mvs_dev_found_notify(struct domain_device *dev, int lock)
1457{
1458 unsigned long flags = 0;
1459 int res = 0;
1460 struct mvs_info *mvi = NULL;
1461 struct domain_device *parent_dev = dev->parent;
1462 struct mvs_device *mvi_device;
1463
1464 mvi = mvs_find_dev_mvi(dev);
1465
1466 if (lock)
1467 spin_lock_irqsave(&mvi->lock, flags);
1468
1469 mvi_device = mvs_alloc_dev(mvi);
1470 if (!mvi_device) {
1471 res = -1;
1472 goto found_out;
1473 }
Andy Yanf9da3be2009-05-14 20:41:21 -04001474 dev->lldd_dev = mvi_device;
Srinivas9dc9fd92010-02-15 00:00:00 -06001475 mvi_device->dev_status = MVS_DEV_NORMAL;
Andy Yan20b09c22009-05-08 17:46:40 -04001476 mvi_device->dev_type = dev->dev_type;
Andy Yan9870d9a2009-05-11 22:19:25 +08001477 mvi_device->mvi_info = mvi;
Andy Yan20b09c22009-05-08 17:46:40 -04001478 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1479 int phy_id;
1480 u8 phy_num = parent_dev->ex_dev.num_phys;
1481 struct ex_phy *phy;
1482 for (phy_id = 0; phy_id < phy_num; phy_id++) {
1483 phy = &parent_dev->ex_dev.ex_phy[phy_id];
1484 if (SAS_ADDR(phy->attached_sas_addr) ==
1485 SAS_ADDR(dev->sas_addr)) {
1486 mvi_device->attached_phy = phy_id;
1487 break;
1488 }
1489 }
1490
1491 if (phy_id == phy_num) {
1492 mv_printk("Error: no attached dev:%016llx"
1493 "at ex:%016llx.\n",
1494 SAS_ADDR(dev->sas_addr),
1495 SAS_ADDR(parent_dev->sas_addr));
1496 res = -1;
1497 }
1498 }
1499
1500found_out:
1501 if (lock)
1502 spin_unlock_irqrestore(&mvi->lock, flags);
1503 return res;
1504}
1505
1506int mvs_dev_found(struct domain_device *dev)
1507{
1508 return mvs_dev_found_notify(dev, 1);
1509}
1510
Srinivas9dc9fd92010-02-15 00:00:00 -06001511void mvs_dev_gone_notify(struct domain_device *dev)
Andy Yan20b09c22009-05-08 17:46:40 -04001512{
1513 unsigned long flags = 0;
Andy Yanf9da3be2009-05-14 20:41:21 -04001514 struct mvs_device *mvi_dev = dev->lldd_dev;
Andy Yan9870d9a2009-05-11 22:19:25 +08001515 struct mvs_info *mvi = mvi_dev->mvi_info;
Andy Yan20b09c22009-05-08 17:46:40 -04001516
Srinivas9dc9fd92010-02-15 00:00:00 -06001517 spin_lock_irqsave(&mvi->lock, flags);
Andy Yan20b09c22009-05-08 17:46:40 -04001518
1519 if (mvi_dev) {
1520 mv_dprintk("found dev[%d:%x] is gone.\n",
1521 mvi_dev->device_id, mvi_dev->dev_type);
Srinivas9dc9fd92010-02-15 00:00:00 -06001522 mvs_release_task(mvi, dev);
Andy Yan20b09c22009-05-08 17:46:40 -04001523 mvs_free_reg_set(mvi, mvi_dev);
1524 mvs_free_dev(mvi_dev);
1525 } else {
1526 mv_dprintk("found dev has gone.\n");
1527 }
1528 dev->lldd_dev = NULL;
1529
Srinivas9dc9fd92010-02-15 00:00:00 -06001530 spin_unlock_irqrestore(&mvi->lock, flags);
Andy Yan20b09c22009-05-08 17:46:40 -04001531}
1532
1533
1534void mvs_dev_gone(struct domain_device *dev)
1535{
Srinivas9dc9fd92010-02-15 00:00:00 -06001536 mvs_dev_gone_notify(dev);
Andy Yan20b09c22009-05-08 17:46:40 -04001537}
1538
1539static struct sas_task *mvs_alloc_task(void)
1540{
1541 struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
1542
1543 if (task) {
1544 INIT_LIST_HEAD(&task->list);
1545 spin_lock_init(&task->task_state_lock);
1546 task->task_state_flags = SAS_TASK_STATE_PENDING;
1547 init_timer(&task->timer);
1548 init_completion(&task->completion);
1549 }
1550 return task;
1551}
1552
1553static void mvs_free_task(struct sas_task *task)
1554{
1555 if (task) {
1556 BUG_ON(!list_empty(&task->list));
1557 kfree(task);
1558 }
1559}
1560
1561static void mvs_task_done(struct sas_task *task)
1562{
1563 if (!del_timer(&task->timer))
1564 return;
1565 complete(&task->completion);
1566}
1567
1568static void mvs_tmf_timedout(unsigned long data)
1569{
1570 struct sas_task *task = (struct sas_task *)data;
1571
1572 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1573 complete(&task->completion);
1574}
1575
1576/* XXX */
1577#define MVS_TASK_TIMEOUT 20
1578static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1579 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
1580{
1581 int res, retry;
1582 struct sas_task *task = NULL;
1583
1584 for (retry = 0; retry < 3; retry++) {
1585 task = mvs_alloc_task();
1586 if (!task)
1587 return -ENOMEM;
1588
1589 task->dev = dev;
1590 task->task_proto = dev->tproto;
1591
1592 memcpy(&task->ssp_task, parameter, para_len);
1593 task->task_done = mvs_task_done;
1594
1595 task->timer.data = (unsigned long) task;
1596 task->timer.function = mvs_tmf_timedout;
1597 task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1598 add_timer(&task->timer);
1599
Andy Yan0b84b702009-05-11 20:05:26 +08001600 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
Andy Yan20b09c22009-05-08 17:46:40 -04001601
1602 if (res) {
1603 del_timer(&task->timer);
1604 mv_printk("executing internel task failed:%d\n", res);
1605 goto ex_err;
1606 }
1607
1608 wait_for_completion(&task->completion);
1609 res = -TMF_RESP_FUNC_FAILED;
1610 /* Even TMF timed out, return direct. */
1611 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1612 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1613 mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
1614 goto ex_err;
1615 }
1616 }
1617
1618 if (task->task_status.resp == SAS_TASK_COMPLETE &&
James Bottomleydf64d3c2010-07-27 15:51:13 -05001619 task->task_status.stat == SAM_STAT_GOOD) {
Andy Yan20b09c22009-05-08 17:46:40 -04001620 res = TMF_RESP_FUNC_COMPLETE;
1621 break;
1622 }
1623
1624 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1625 task->task_status.stat == SAS_DATA_UNDERRUN) {
1626 /* no error, but return the number of bytes of
1627 * underrun */
1628 res = task->task_status.residual;
1629 break;
1630 }
1631
1632 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1633 task->task_status.stat == SAS_DATA_OVERRUN) {
1634 mv_dprintk("blocked task error.\n");
1635 res = -EMSGSIZE;
1636 break;
1637 } else {
1638 mv_dprintk(" task to dev %016llx response: 0x%x "
1639 "status 0x%x\n",
1640 SAS_ADDR(dev->sas_addr),
1641 task->task_status.resp,
1642 task->task_status.stat);
1643 mvs_free_task(task);
1644 task = NULL;
1645
1646 }
1647 }
1648ex_err:
1649 BUG_ON(retry == 3 && task != NULL);
1650 if (task != NULL)
1651 mvs_free_task(task);
1652 return res;
1653}
1654
1655static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1656 u8 *lun, struct mvs_tmf_task *tmf)
1657{
1658 struct sas_ssp_task ssp_task;
1659 DECLARE_COMPLETION_ONSTACK(completion);
1660 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1661 return TMF_RESP_FUNC_ESUPP;
1662
1663 strncpy((u8 *)&ssp_task.LUN, lun, 8);
1664
1665 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1666 sizeof(ssp_task), tmf);
1667}
1668
1669
1670/* Standard mandates link reset for ATA (type 0)
1671 and hard reset for SSP (type 1) , only for RECOVERY */
1672static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1673{
1674 int rc;
1675 struct sas_phy *phy = sas_find_local_phy(dev);
1676 int reset_type = (dev->dev_type == SATA_DEV ||
1677 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1678 rc = sas_phy_reset(phy, reset_type);
1679 msleep(2000);
1680 return rc;
1681}
1682
1683/* mandatory SAM-3 */
1684int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1685{
1686 unsigned long flags;
1687 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1688 struct mvs_tmf_task tmf_task;
Andy Yanf9da3be2009-05-14 20:41:21 -04001689 struct mvs_device * mvi_dev = dev->lldd_dev;
Andy Yan9870d9a2009-05-11 22:19:25 +08001690 struct mvs_info *mvi = mvi_dev->mvi_info;
Andy Yan20b09c22009-05-08 17:46:40 -04001691
1692 tmf_task.tmf = TMF_LU_RESET;
1693 mvi_dev->dev_status = MVS_DEV_EH;
1694 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1695 if (rc == TMF_RESP_FUNC_COMPLETE) {
1696 num = mvs_find_dev_phyno(dev, phyno);
1697 spin_lock_irqsave(&mvi->lock, flags);
1698 for (i = 0; i < num; i++)
Srinivas9dc9fd92010-02-15 00:00:00 -06001699 mvs_release_task(mvi, dev);
Andy Yan20b09c22009-05-08 17:46:40 -04001700 spin_unlock_irqrestore(&mvi->lock, flags);
1701 }
1702 /* If failed, fall-through I_T_Nexus reset */
1703 mv_printk("%s for device[%x]:rc= %d\n", __func__,
1704 mvi_dev->device_id, rc);
1705 return rc;
Jeff Garzikb5762942007-10-25 20:58:22 -04001706}
1707
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001708int mvs_I_T_nexus_reset(struct domain_device *dev)
Ke Weie9ff91b2008-03-27 14:55:33 +08001709{
Andy Yan20b09c22009-05-08 17:46:40 -04001710 unsigned long flags;
Srinivas9dc9fd92010-02-15 00:00:00 -06001711 int rc = TMF_RESP_FUNC_FAILED;
1712 struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
Andy Yan9870d9a2009-05-11 22:19:25 +08001713 struct mvs_info *mvi = mvi_dev->mvi_info;
Andy Yan20b09c22009-05-08 17:46:40 -04001714
1715 if (mvi_dev->dev_status != MVS_DEV_EH)
1716 return TMF_RESP_FUNC_COMPLETE;
1717 rc = mvs_debug_I_T_nexus_reset(dev);
1718 mv_printk("%s for device[%x]:rc= %d\n",
1719 __func__, mvi_dev->device_id, rc);
1720
1721 /* housekeeper */
Andy Yan20b09c22009-05-08 17:46:40 -04001722 spin_lock_irqsave(&mvi->lock, flags);
Srinivas9dc9fd92010-02-15 00:00:00 -06001723 mvs_release_task(mvi, dev);
Andy Yan20b09c22009-05-08 17:46:40 -04001724 spin_unlock_irqrestore(&mvi->lock, flags);
1725
1726 return rc;
1727}
1728/* optional SAM-3 */
1729int mvs_query_task(struct sas_task *task)
1730{
1731 u32 tag;
1732 struct scsi_lun lun;
1733 struct mvs_tmf_task tmf_task;
1734 int rc = TMF_RESP_FUNC_FAILED;
1735
1736 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1737 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1738 struct domain_device *dev = task->dev;
Andy Yan9870d9a2009-05-11 22:19:25 +08001739 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1740 struct mvs_info *mvi = mvi_dev->mvi_info;
Andy Yan20b09c22009-05-08 17:46:40 -04001741
1742 int_to_scsilun(cmnd->device->lun, &lun);
1743 rc = mvs_find_tag(mvi, task, &tag);
1744 if (rc == 0) {
1745 rc = TMF_RESP_FUNC_FAILED;
1746 return rc;
1747 }
1748
1749 tmf_task.tmf = TMF_QUERY_TASK;
1750 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1751
1752 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1753 switch (rc) {
1754 /* The task is still in Lun, release it then */
1755 case TMF_RESP_FUNC_SUCC:
1756 /* The task is not in Lun or failed, reset the phy */
1757 case TMF_RESP_FUNC_FAILED:
1758 case TMF_RESP_FUNC_COMPLETE:
1759 break;
Srinivas9dc9fd92010-02-15 00:00:00 -06001760 default:
1761 rc = TMF_RESP_FUNC_COMPLETE;
1762 break;
Andy Yan20b09c22009-05-08 17:46:40 -04001763 }
1764 }
1765 mv_printk("%s:rc= %d\n", __func__, rc);
1766 return rc;
1767}
1768
1769/* mandatory SAM-3, still need free task/slot info */
1770int mvs_abort_task(struct sas_task *task)
1771{
1772 struct scsi_lun lun;
1773 struct mvs_tmf_task tmf_task;
1774 struct domain_device *dev = task->dev;
Andy Yan9870d9a2009-05-11 22:19:25 +08001775 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
Jiri Slaby24ae1632010-06-22 13:42:02 +02001776 struct mvs_info *mvi;
Andy Yan20b09c22009-05-08 17:46:40 -04001777 int rc = TMF_RESP_FUNC_FAILED;
1778 unsigned long flags;
1779 u32 tag;
Andy Yan9870d9a2009-05-11 22:19:25 +08001780
Srinivas9dc9fd92010-02-15 00:00:00 -06001781 if (!mvi_dev) {
1782 mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__);
1783 rc = TMF_RESP_FUNC_FAILED;
1784 }
1785
Jiri Slaby24ae1632010-06-22 13:42:02 +02001786 mvi = mvi_dev->mvi_info;
1787
Andy Yan20b09c22009-05-08 17:46:40 -04001788 spin_lock_irqsave(&task->task_state_lock, flags);
1789 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1790 spin_unlock_irqrestore(&task->task_state_lock, flags);
1791 rc = TMF_RESP_FUNC_COMPLETE;
1792 goto out;
1793 }
1794 spin_unlock_irqrestore(&task->task_state_lock, flags);
Srinivas9dc9fd92010-02-15 00:00:00 -06001795 mvi_dev->dev_status = MVS_DEV_EH;
Andy Yan20b09c22009-05-08 17:46:40 -04001796 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1797 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1798
1799 int_to_scsilun(cmnd->device->lun, &lun);
1800 rc = mvs_find_tag(mvi, task, &tag);
1801 if (rc == 0) {
1802 mv_printk("No such tag in %s\n", __func__);
1803 rc = TMF_RESP_FUNC_FAILED;
1804 return rc;
1805 }
1806
1807 tmf_task.tmf = TMF_ABORT_TASK;
1808 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1809
1810 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1811
1812 /* if successful, clear the task and callback forwards.*/
1813 if (rc == TMF_RESP_FUNC_COMPLETE) {
1814 u32 slot_no;
1815 struct mvs_slot_info *slot;
Andy Yan20b09c22009-05-08 17:46:40 -04001816
1817 if (task->lldd_task) {
Andy Yanf9da3be2009-05-14 20:41:21 -04001818 slot = task->lldd_task;
Andy Yan20b09c22009-05-08 17:46:40 -04001819 slot_no = (u32) (slot - mvi->slot_info);
Srinivas9dc9fd92010-02-15 00:00:00 -06001820 spin_lock_irqsave(&mvi->lock, flags);
Andy Yan20b09c22009-05-08 17:46:40 -04001821 mvs_slot_complete(mvi, slot_no, 1);
Srinivas9dc9fd92010-02-15 00:00:00 -06001822 spin_unlock_irqrestore(&mvi->lock, flags);
Andy Yan20b09c22009-05-08 17:46:40 -04001823 }
1824 }
Srinivas9dc9fd92010-02-15 00:00:00 -06001825
Andy Yan20b09c22009-05-08 17:46:40 -04001826 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1827 task->task_proto & SAS_PROTOCOL_STP) {
1828 /* to do free register_set */
Srinivas9dc9fd92010-02-15 00:00:00 -06001829 if (SATA_DEV == dev->dev_type) {
1830 struct mvs_slot_info *slot = task->lldd_task;
1831 struct task_status_struct *tstat;
1832 u32 slot_idx = (u32)(slot - mvi->slot_info);
1833 tstat = &task->task_status;
1834 mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
1835 "slot=%p slot_idx=x%x\n",
1836 mvi, task, slot, slot_idx);
1837 tstat->stat = SAS_ABORTED_TASK;
1838 if (mvi_dev && mvi_dev->running_req)
1839 mvi_dev->running_req--;
1840 if (sas_protocol_ata(task->task_proto))
1841 mvs_free_reg_set(mvi, mvi_dev);
1842 mvs_slot_task_free(mvi, task, slot, slot_idx);
1843 return -1;
1844 }
Andy Yan20b09c22009-05-08 17:46:40 -04001845 } else {
1846 /* SMP */
1847
1848 }
1849out:
1850 if (rc != TMF_RESP_FUNC_COMPLETE)
1851 mv_printk("%s:rc= %d\n", __func__, rc);
1852 return rc;
1853}
1854
1855int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
1856{
1857 int rc = TMF_RESP_FUNC_FAILED;
1858 struct mvs_tmf_task tmf_task;
1859
1860 tmf_task.tmf = TMF_ABORT_TASK_SET;
1861 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1862
1863 return rc;
1864}
1865
1866int mvs_clear_aca(struct domain_device *dev, u8 *lun)
1867{
1868 int rc = TMF_RESP_FUNC_FAILED;
1869 struct mvs_tmf_task tmf_task;
1870
1871 tmf_task.tmf = TMF_CLEAR_ACA;
1872 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1873
1874 return rc;
1875}
1876
1877int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
1878{
1879 int rc = TMF_RESP_FUNC_FAILED;
1880 struct mvs_tmf_task tmf_task;
1881
1882 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1883 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1884
1885 return rc;
Ke Weie9ff91b2008-03-27 14:55:33 +08001886}
1887
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001888static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1889 u32 slot_idx, int err)
1890{
Andy Yanf9da3be2009-05-14 20:41:21 -04001891 struct mvs_device *mvi_dev = task->dev->lldd_dev;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001892 struct task_status_struct *tstat = &task->task_status;
1893 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
James Bottomleydf64d3c2010-07-27 15:51:13 -05001894 int stat = SAM_STAT_GOOD;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001895
Andy Yan20b09c22009-05-08 17:46:40 -04001896
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001897 resp->frame_len = sizeof(struct dev_to_host_fis);
1898 memcpy(&resp->ending_fis[0],
Andy Yan20b09c22009-05-08 17:46:40 -04001899 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001900 sizeof(struct dev_to_host_fis));
1901 tstat->buf_valid_size = sizeof(*resp);
Srinivas9dc9fd92010-02-15 00:00:00 -06001902 if (unlikely(err)) {
1903 if (unlikely(err & CMD_ISS_STPD))
1904 stat = SAS_OPEN_REJECT;
1905 else
1906 stat = SAS_PROTO_RESPONSE;
1907 }
1908
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001909 return stat;
1910}
1911
1912static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1913 u32 slot_idx)
1914{
1915 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
Andy Yan20b09c22009-05-08 17:46:40 -04001916 int stat;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001917 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
Andy Yan20b09c22009-05-08 17:46:40 -04001918 u32 tfs = 0;
1919 enum mvs_port_type type = PORT_TYPE_SAS;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001920
Andy Yan20b09c22009-05-08 17:46:40 -04001921 if (err_dw0 & CMD_ISS_STPD)
1922 MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
1923
1924 MVS_CHIP_DISP->command_active(mvi, slot_idx);
1925
James Bottomleydf64d3c2010-07-27 15:51:13 -05001926 stat = SAM_STAT_CHECK_CONDITION;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001927 switch (task->task_proto) {
1928 case SAS_PROTOCOL_SSP:
Andy Yan20b09c22009-05-08 17:46:40 -04001929 stat = SAS_ABORTED_TASK;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001930 break;
1931 case SAS_PROTOCOL_SMP:
James Bottomleydf64d3c2010-07-27 15:51:13 -05001932 stat = SAM_STAT_CHECK_CONDITION;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001933 break;
Andy Yan20b09c22009-05-08 17:46:40 -04001934
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001935 case SAS_PROTOCOL_SATA:
1936 case SAS_PROTOCOL_STP:
1937 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
Andy Yan20b09c22009-05-08 17:46:40 -04001938 {
1939 if (err_dw0 == 0x80400002)
1940 mv_printk("find reserved error, why?\n");
1941
1942 task->ata_task.use_ncq = 0;
Srinivas9dc9fd92010-02-15 00:00:00 -06001943 mvs_sata_done(mvi, task, slot_idx, err_dw0);
Andy Yan20b09c22009-05-08 17:46:40 -04001944 }
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001945 break;
1946 default:
1947 break;
1948 }
1949
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001950 return stat;
1951}
1952
Andy Yan20b09c22009-05-08 17:46:40 -04001953int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001954{
1955 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1956 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1957 struct sas_task *task = slot->task;
Andy Yan20b09c22009-05-08 17:46:40 -04001958 struct mvs_device *mvi_dev = NULL;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001959 struct task_status_struct *tstat;
Srinivas9dc9fd92010-02-15 00:00:00 -06001960 struct domain_device *dev;
1961 u32 aborted;
Andy Yan20b09c22009-05-08 17:46:40 -04001962
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001963 void *to;
Andy Yan20b09c22009-05-08 17:46:40 -04001964 enum exec_status sts;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001965
Andy Yan20b09c22009-05-08 17:46:40 -04001966 if (mvi->exp_req)
1967 mvi->exp_req--;
Srinivas9dc9fd92010-02-15 00:00:00 -06001968 if (unlikely(!task || !task->lldd_task || !task->dev))
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001969 return -1;
1970
Andy Yan20b09c22009-05-08 17:46:40 -04001971 tstat = &task->task_status;
Srinivas9dc9fd92010-02-15 00:00:00 -06001972 dev = task->dev;
1973 mvi_dev = dev->lldd_dev;
Andy Yan20b09c22009-05-08 17:46:40 -04001974
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001975 mvs_hba_cq_dump(mvi);
1976
1977 spin_lock(&task->task_state_lock);
Andy Yan20b09c22009-05-08 17:46:40 -04001978 task->task_state_flags &=
1979 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1980 task->task_state_flags |= SAS_TASK_STATE_DONE;
1981 /* race condition*/
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001982 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001983 spin_unlock(&task->task_state_lock);
1984
Jeff Garzikdd4969a2009-05-08 17:44:01 -04001985 memset(tstat, 0, sizeof(*tstat));
1986 tstat->resp = SAS_TASK_COMPLETE;
1987
Andy Yan20b09c22009-05-08 17:46:40 -04001988 if (unlikely(aborted)) {
1989 tstat->stat = SAS_ABORTED_TASK;
Srinivas9dc9fd92010-02-15 00:00:00 -06001990 if (mvi_dev && mvi_dev->running_req)
1991 mvi_dev->running_req--;
Andy Yan20b09c22009-05-08 17:46:40 -04001992 if (sas_protocol_ata(task->task_proto))
1993 mvs_free_reg_set(mvi, mvi_dev);
1994
1995 mvs_slot_task_free(mvi, task, slot, slot_idx);
1996 return -1;
1997 }
1998
Srinivas9dc9fd92010-02-15 00:00:00 -06001999 if (unlikely(!mvi_dev || flags)) {
2000 if (!mvi_dev)
2001 mv_dprintk("port has not device.\n");
Andy Yan20b09c22009-05-08 17:46:40 -04002002 tstat->stat = SAS_PHY_DOWN;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002003 goto out;
2004 }
2005
2006 /* error info record present */
2007 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
2008 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
Srinivas9dc9fd92010-02-15 00:00:00 -06002009 tstat->resp = SAS_TASK_COMPLETE;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002010 goto out;
2011 }
2012
2013 switch (task->task_proto) {
2014 case SAS_PROTOCOL_SSP:
2015 /* hw says status == 0, datapres == 0 */
2016 if (rx_desc & RXQ_GOOD) {
James Bottomleydf64d3c2010-07-27 15:51:13 -05002017 tstat->stat = SAM_STAT_GOOD;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002018 tstat->resp = SAS_TASK_COMPLETE;
2019 }
2020 /* response frame present */
2021 else if (rx_desc & RXQ_RSP) {
Andy Yan20b09c22009-05-08 17:46:40 -04002022 struct ssp_response_iu *iu = slot->response +
2023 sizeof(struct mvs_err_info);
2024 sas_ssp_task_response(mvi->dev, task, iu);
2025 } else
James Bottomleydf64d3c2010-07-27 15:51:13 -05002026 tstat->stat = SAM_STAT_CHECK_CONDITION;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002027 break;
2028
2029 case SAS_PROTOCOL_SMP: {
2030 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
James Bottomleydf64d3c2010-07-27 15:51:13 -05002031 tstat->stat = SAM_STAT_GOOD;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002032 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
2033 memcpy(to + sg_resp->offset,
2034 slot->response + sizeof(struct mvs_err_info),
2035 sg_dma_len(sg_resp));
2036 kunmap_atomic(to, KM_IRQ0);
2037 break;
2038 }
2039
2040 case SAS_PROTOCOL_SATA:
2041 case SAS_PROTOCOL_STP:
2042 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
2043 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
2044 break;
2045 }
2046
2047 default:
James Bottomleydf64d3c2010-07-27 15:51:13 -05002048 tstat->stat = SAM_STAT_CHECK_CONDITION;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002049 break;
2050 }
Srinivas9dc9fd92010-02-15 00:00:00 -06002051 if (!slot->port->port_attached) {
2052 mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
2053 tstat->stat = SAS_PHY_DOWN;
2054 }
2055
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002056
2057out:
Srinivas9dc9fd92010-02-15 00:00:00 -06002058 if (mvi_dev && mvi_dev->running_req) {
2059 mvi_dev->running_req--;
2060 if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
Andy Yan0f980a82009-05-11 21:49:52 +08002061 mvs_free_reg_set(mvi, mvi_dev);
2062 }
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002063 mvs_slot_task_free(mvi, task, slot, slot_idx);
Andy Yan20b09c22009-05-08 17:46:40 -04002064 sts = tstat->stat;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002065
2066 spin_unlock(&mvi->lock);
Andy Yan20b09c22009-05-08 17:46:40 -04002067 if (task->task_done)
2068 task->task_done(task);
2069 else
2070 mv_dprintk("why has not task_done.\n");
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002071 spin_lock(&mvi->lock);
Andy Yan20b09c22009-05-08 17:46:40 -04002072
2073 return sts;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002074}
2075
Srinivas9dc9fd92010-02-15 00:00:00 -06002076void mvs_do_release_task(struct mvs_info *mvi,
Andy Yan20b09c22009-05-08 17:46:40 -04002077 int phy_no, struct domain_device *dev)
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002078{
Srinivas9dc9fd92010-02-15 00:00:00 -06002079 u32 slot_idx;
Andy Yan20b09c22009-05-08 17:46:40 -04002080 struct mvs_phy *phy;
2081 struct mvs_port *port;
2082 struct mvs_slot_info *slot, *slot2;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002083
Andy Yan20b09c22009-05-08 17:46:40 -04002084 phy = &mvi->phy[phy_no];
2085 port = phy->port;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002086 if (!port)
2087 return;
Srinivas9dc9fd92010-02-15 00:00:00 -06002088 /* clean cmpl queue in case request is already finished */
2089 mvs_int_rx(mvi, false);
2090
2091
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002092
Andy Yan20b09c22009-05-08 17:46:40 -04002093 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
2094 struct sas_task *task;
2095 slot_idx = (u32) (slot - mvi->slot_info);
2096 task = slot->task;
2097
2098 if (dev && task->dev != dev)
2099 continue;
2100
2101 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
2102 slot_idx, slot->slot_tag, task);
Srinivas9dc9fd92010-02-15 00:00:00 -06002103 MVS_CHIP_DISP->command_active(mvi, slot_idx);
Andy Yan20b09c22009-05-08 17:46:40 -04002104
2105 mvs_slot_complete(mvi, slot_idx, 1);
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002106 }
2107}
2108
Srinivas9dc9fd92010-02-15 00:00:00 -06002109void mvs_release_task(struct mvs_info *mvi,
2110 struct domain_device *dev)
2111{
2112 int i, phyno[WIDE_PORT_MAX_PHY], num;
2113 /* housekeeper */
2114 num = mvs_find_dev_phyno(dev, phyno);
2115 for (i = 0; i < num; i++)
2116 mvs_do_release_task(mvi, phyno[i], dev);
2117}
2118
Andy Yan20b09c22009-05-08 17:46:40 -04002119static void mvs_phy_disconnected(struct mvs_phy *phy)
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002120{
Andy Yan20b09c22009-05-08 17:46:40 -04002121 phy->phy_attached = 0;
2122 phy->att_dev_info = 0;
2123 phy->att_dev_sas_addr = 0;
2124}
2125
2126static void mvs_work_queue(struct work_struct *work)
2127{
2128 struct delayed_work *dw = container_of(work, struct delayed_work, work);
2129 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
2130 struct mvs_info *mvi = mwq->mvi;
2131 unsigned long flags;
2132
2133 spin_lock_irqsave(&mvi->lock, flags);
2134 if (mwq->handler & PHY_PLUG_EVENT) {
2135 u32 phy_no = (unsigned long) mwq->data;
2136 struct sas_ha_struct *sas_ha = mvi->sas;
2137 struct mvs_phy *phy = &mvi->phy[phy_no];
2138 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2139
2140 if (phy->phy_event & PHY_PLUG_OUT) {
2141 u32 tmp;
2142 struct sas_identify_frame *id;
2143 id = (struct sas_identify_frame *)phy->frame_rcvd;
2144 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
2145 phy->phy_event &= ~PHY_PLUG_OUT;
2146 if (!(tmp & PHY_READY_MASK)) {
2147 sas_phy_disconnected(sas_phy);
2148 mvs_phy_disconnected(phy);
2149 sas_ha->notify_phy_event(sas_phy,
2150 PHYE_LOSS_OF_SIGNAL);
2151 mv_dprintk("phy%d Removed Device\n", phy_no);
2152 } else {
2153 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2154 mvs_update_phyinfo(mvi, phy_no, 1);
2155 mvs_bytes_dmaed(mvi, phy_no);
2156 mvs_port_notify_formed(sas_phy, 0);
2157 mv_dprintk("phy%d Attached Device\n", phy_no);
2158 }
2159 }
2160 }
2161 list_del(&mwq->entry);
2162 spin_unlock_irqrestore(&mvi->lock, flags);
2163 kfree(mwq);
2164}
2165
2166static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
2167{
2168 struct mvs_wq *mwq;
2169 int ret = 0;
2170
2171 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
2172 if (mwq) {
2173 mwq->mvi = mvi;
2174 mwq->data = data;
2175 mwq->handler = handler;
2176 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
2177 list_add_tail(&mwq->entry, &mvi->wq_list);
2178 schedule_delayed_work(&mwq->work_q, HZ * 2);
2179 } else
2180 ret = -ENOMEM;
2181
2182 return ret;
2183}
2184
2185static void mvs_sig_time_out(unsigned long tphy)
2186{
2187 struct mvs_phy *phy = (struct mvs_phy *)tphy;
2188 struct mvs_info *mvi = phy->mvi;
2189 u8 phy_no;
2190
2191 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
2192 if (&mvi->phy[phy_no] == phy) {
2193 mv_dprintk("Get signature time out, reset phy %d\n",
2194 phy_no+mvi->id*mvi->chip->n_phy);
2195 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
2196 }
2197 }
2198}
2199
Andy Yan20b09c22009-05-08 17:46:40 -04002200void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2201{
2202 u32 tmp;
2203 struct sas_ha_struct *sas_ha = mvi->sas;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002204 struct mvs_phy *phy = &mvi->phy[phy_no];
2205 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2206
Andy Yan20b09c22009-05-08 17:46:40 -04002207 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2208 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2209 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2210 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
2211 phy->irq_status);
2212
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002213 /*
2214 * events is port event now ,
2215 * we need check the interrupt status which belongs to per port.
2216 */
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002217
Srinivas9dc9fd92010-02-15 00:00:00 -06002218 if (phy->irq_status & PHYEV_DCDR_ERR) {
Andy Yan20b09c22009-05-08 17:46:40 -04002219 mv_dprintk("port %d STP decoding error.\n",
Srinivas9dc9fd92010-02-15 00:00:00 -06002220 phy_no + mvi->id*mvi->chip->n_phy);
2221 }
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002222
Andy Yan20b09c22009-05-08 17:46:40 -04002223 if (phy->irq_status & PHYEV_POOF) {
2224 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2225 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2226 int ready;
Srinivas9dc9fd92010-02-15 00:00:00 -06002227 mvs_do_release_task(mvi, phy_no, NULL);
Andy Yan20b09c22009-05-08 17:46:40 -04002228 phy->phy_event |= PHY_PLUG_OUT;
Srinivas9dc9fd92010-02-15 00:00:00 -06002229 MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
Andy Yan20b09c22009-05-08 17:46:40 -04002230 mvs_handle_event(mvi,
2231 (void *)(unsigned long)phy_no,
2232 PHY_PLUG_EVENT);
2233 ready = mvs_is_phy_ready(mvi, phy_no);
2234 if (!ready)
2235 mv_dprintk("phy%d Unplug Notice\n",
2236 phy_no +
2237 mvi->id * mvi->chip->n_phy);
2238 if (ready || dev_sata) {
2239 if (MVS_CHIP_DISP->stp_reset)
2240 MVS_CHIP_DISP->stp_reset(mvi,
2241 phy_no);
2242 else
2243 MVS_CHIP_DISP->phy_reset(mvi,
2244 phy_no, 0);
2245 return;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002246 }
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002247 }
2248 }
Andy Yan20b09c22009-05-08 17:46:40 -04002249
2250 if (phy->irq_status & PHYEV_COMWAKE) {
2251 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
2252 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
2253 tmp | PHYEV_SIG_FIS);
2254 if (phy->timer.function == NULL) {
2255 phy->timer.data = (unsigned long)phy;
2256 phy->timer.function = mvs_sig_time_out;
2257 phy->timer.expires = jiffies + 10*HZ;
2258 add_timer(&phy->timer);
2259 }
2260 }
2261 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2262 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
Andy Yan20b09c22009-05-08 17:46:40 -04002263 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2264 if (phy->phy_status) {
2265 mdelay(10);
2266 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2267 if (phy->phy_type & PORT_TYPE_SATA) {
2268 tmp = MVS_CHIP_DISP->read_port_irq_mask(
2269 mvi, phy_no);
2270 tmp &= ~PHYEV_SIG_FIS;
2271 MVS_CHIP_DISP->write_port_irq_mask(mvi,
2272 phy_no, tmp);
2273 }
2274 mvs_update_phyinfo(mvi, phy_no, 0);
Srinivas9dc9fd92010-02-15 00:00:00 -06002275 if (phy->phy_type & PORT_TYPE_SAS) {
2276 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2);
2277 mdelay(10);
2278 }
2279
Andy Yan20b09c22009-05-08 17:46:40 -04002280 mvs_bytes_dmaed(mvi, phy_no);
2281 /* whether driver is going to handle hot plug */
2282 if (phy->phy_event & PHY_PLUG_OUT) {
2283 mvs_port_notify_formed(sas_phy, 0);
2284 phy->phy_event &= ~PHY_PLUG_OUT;
2285 }
2286 } else {
2287 mv_dprintk("plugin interrupt but phy%d is gone\n",
2288 phy_no + mvi->id*mvi->chip->n_phy);
2289 }
2290 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2291 mv_dprintk("port %d broadcast change.\n",
2292 phy_no + mvi->id*mvi->chip->n_phy);
2293 /* exception for Samsung disk drive*/
2294 mdelay(1000);
2295 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2296 }
2297 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002298}
2299
Andy Yan20b09c22009-05-08 17:46:40 -04002300int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002301{
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002302 u32 rx_prod_idx, rx_desc;
2303 bool attn = false;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002304
2305 /* the first dword in the RX ring is special: it contains
2306 * a mirror of the hardware's RX producer index, so that
2307 * we don't have to stall the CPU reading that register.
2308 * The actual RX ring is offset by one dword, due to this.
2309 */
2310 rx_prod_idx = mvi->rx_cons;
2311 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
2312 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
2313 return 0;
2314
2315 /* The CMPL_Q may come late, read from register and try again
2316 * note: if coalescing is enabled,
2317 * it will need to read from register every time for sure
2318 */
Andy Yan20b09c22009-05-08 17:46:40 -04002319 if (unlikely(mvi->rx_cons == rx_prod_idx))
2320 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002321
2322 if (mvi->rx_cons == rx_prod_idx)
2323 return 0;
2324
2325 while (mvi->rx_cons != rx_prod_idx) {
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002326 /* increment our internal RX consumer pointer */
2327 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002328 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
2329
2330 if (likely(rx_desc & RXQ_DONE))
2331 mvs_slot_complete(mvi, rx_desc, 0);
2332 if (rx_desc & RXQ_ATTN) {
2333 attn = true;
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002334 } else if (rx_desc & RXQ_ERR) {
2335 if (!(rx_desc & RXQ_DONE))
2336 mvs_slot_complete(mvi, rx_desc, 0);
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002337 } else if (rx_desc & RXQ_SLOT_RESET) {
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002338 mvs_slot_free(mvi, rx_desc);
2339 }
2340 }
2341
2342 if (attn && self_clear)
Andy Yan20b09c22009-05-08 17:46:40 -04002343 MVS_CHIP_DISP->int_full(mvi);
Jeff Garzikdd4969a2009-05-08 17:44:01 -04002344 return 0;
2345}
2346