blob: 4525d3664ccffdb3b25eb07055b47b6e4ed0e994 [file] [log] [blame]
Taku Izumi8cdc3f62015-08-21 17:29:18 +09001/*
2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 */
21
22#include "fjes_hw.h"
23#include "fjes.h"
24
Taku Izumi785f28e2015-08-21 17:29:35 +090025static void fjes_hw_update_zone_task(struct work_struct *);
26
Taku Izumi8cdc3f62015-08-21 17:29:18 +090027/* supported MTU list */
28const u32 fjes_support_mtu[] = {
29 FJES_MTU_DEFINE(8 * 1024),
30 FJES_MTU_DEFINE(16 * 1024),
31 FJES_MTU_DEFINE(32 * 1024),
32 FJES_MTU_DEFINE(64 * 1024),
33 0
34};
35
36u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg)
37{
38 u8 *base = hw->base;
39 u32 value = 0;
40
41 value = readl(&base[reg]);
42
43 return value;
44}
45
46static u8 *fjes_hw_iomap(struct fjes_hw *hw)
47{
48 u8 *base;
49
50 if (!request_mem_region(hw->hw_res.start, hw->hw_res.size,
51 fjes_driver_name)) {
52 pr_err("request_mem_region failed\n");
53 return NULL;
54 }
55
56 base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
57
58 return base;
59}
60
Taku Izumia18aaec2015-08-21 17:29:19 +090061static void fjes_hw_iounmap(struct fjes_hw *hw)
62{
63 iounmap(hw->base);
64 release_mem_region(hw->hw_res.start, hw->hw_res.size);
65}
66
Taku Izumi8cdc3f62015-08-21 17:29:18 +090067int fjes_hw_reset(struct fjes_hw *hw)
68{
69 union REG_DCTL dctl;
70 int timeout;
71
72 dctl.reg = 0;
73 dctl.bits.reset = 1;
74 wr32(XSCT_DCTL, dctl.reg);
75
76 timeout = FJES_DEVICE_RESET_TIMEOUT * 1000;
77 dctl.reg = rd32(XSCT_DCTL);
78 while ((dctl.bits.reset == 1) && (timeout > 0)) {
79 msleep(1000);
80 dctl.reg = rd32(XSCT_DCTL);
81 timeout -= 1000;
82 }
83
84 return timeout > 0 ? 0 : -EIO;
85}
86
87static int fjes_hw_get_max_epid(struct fjes_hw *hw)
88{
89 union REG_MAX_EP info;
90
91 info.reg = rd32(XSCT_MAX_EP);
92
93 return info.bits.maxep;
94}
95
96static int fjes_hw_get_my_epid(struct fjes_hw *hw)
97{
98 union REG_OWNER_EPID info;
99
100 info.reg = rd32(XSCT_OWNER_EPID);
101
102 return info.bits.epid;
103}
104
105static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw)
106{
107 size_t size;
108
109 size = sizeof(struct fjes_device_shared_info) +
110 (sizeof(u8) * hw->max_epid);
111 hw->hw_info.share = kzalloc(size, GFP_KERNEL);
112 if (!hw->hw_info.share)
113 return -ENOMEM;
114
115 hw->hw_info.share->epnum = hw->max_epid;
116
117 return 0;
118}
119
Taku Izumia18aaec2015-08-21 17:29:19 +0900120static void fjes_hw_free_shared_status_region(struct fjes_hw *hw)
121{
122 kfree(hw->hw_info.share);
123 hw->hw_info.share = NULL;
124}
125
Taku Izumi8cdc3f62015-08-21 17:29:18 +0900126static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh)
127{
128 void *mem;
129
130 mem = vzalloc(EP_BUFFER_SIZE);
131 if (!mem)
132 return -ENOMEM;
133
134 epbh->buffer = mem;
135 epbh->size = EP_BUFFER_SIZE;
136
137 epbh->info = (union ep_buffer_info *)mem;
138 epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
139
140 return 0;
141}
142
Taku Izumia18aaec2015-08-21 17:29:19 +0900143static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
144{
145 if (epbh->buffer)
146 vfree(epbh->buffer);
147
148 epbh->buffer = NULL;
149 epbh->size = 0;
150
151 epbh->info = NULL;
152 epbh->ring = NULL;
153}
154
Taku Izumi8cdc3f62015-08-21 17:29:18 +0900155void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
156{
157 union ep_buffer_info *info = epbh->info;
158 u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
159 int i;
160
161 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
162 vlan_id[i] = info->v1i.vlan_id[i];
163
164 memset(info, 0, sizeof(union ep_buffer_info));
165
166 info->v1i.version = 0; /* version 0 */
167
168 for (i = 0; i < ETH_ALEN; i++)
169 info->v1i.mac_addr[i] = mac_addr[i];
170
171 info->v1i.head = 0;
172 info->v1i.tail = 1;
173
174 info->v1i.info_size = sizeof(union ep_buffer_info);
175 info->v1i.buffer_size = epbh->size - info->v1i.info_size;
176
177 info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu);
178 info->v1i.count_max =
179 EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max);
180
181 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
182 info->v1i.vlan_id[i] = vlan_id[i];
183}
184
185void
186fjes_hw_init_command_registers(struct fjes_hw *hw,
187 struct fjes_device_command_param *param)
188{
189 /* Request Buffer length */
190 wr32(XSCT_REQBL, (__le32)(param->req_len));
191 /* Response Buffer Length */
192 wr32(XSCT_RESPBL, (__le32)(param->res_len));
193
194 /* Request Buffer Address */
195 wr32(XSCT_REQBAL,
196 (__le32)(param->req_start & GENMASK_ULL(31, 0)));
197 wr32(XSCT_REQBAH,
198 (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32));
199
200 /* Response Buffer Address */
201 wr32(XSCT_RESPBAL,
202 (__le32)(param->res_start & GENMASK_ULL(31, 0)));
203 wr32(XSCT_RESPBAH,
204 (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32));
205
206 /* Share status address */
207 wr32(XSCT_SHSTSAL,
208 (__le32)(param->share_start & GENMASK_ULL(31, 0)));
209 wr32(XSCT_SHSTSAH,
210 (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32));
211}
212
213static int fjes_hw_setup(struct fjes_hw *hw)
214{
215 u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
216 struct fjes_device_command_param param;
217 struct ep_share_mem_info *buf_pair;
218 size_t mem_size;
219 int result;
220 int epidx;
221 void *buf;
222
223 hw->hw_info.max_epid = &hw->max_epid;
224 hw->hw_info.my_epid = &hw->my_epid;
225
226 buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
227 GFP_KERNEL);
228 if (!buf)
229 return -ENOMEM;
230
231 hw->ep_shm_info = (struct ep_share_mem_info *)buf;
232
233 mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
234 hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
235 if (!(hw->hw_info.req_buf))
236 return -ENOMEM;
237
238 hw->hw_info.req_buf_size = mem_size;
239
240 mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
241 hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
242 if (!(hw->hw_info.res_buf))
243 return -ENOMEM;
244
245 hw->hw_info.res_buf_size = mem_size;
246
247 result = fjes_hw_alloc_shared_status_region(hw);
248 if (result)
249 return result;
250
251 hw->hw_info.buffer_share_bit = 0;
252 hw->hw_info.buffer_unshare_reserve_bit = 0;
253
254 for (epidx = 0; epidx < hw->max_epid; epidx++) {
255 if (epidx != hw->my_epid) {
256 buf_pair = &hw->ep_shm_info[epidx];
257
258 result = fjes_hw_alloc_epbuf(&buf_pair->tx);
259 if (result)
260 return result;
261
262 result = fjes_hw_alloc_epbuf(&buf_pair->rx);
263 if (result)
264 return result;
265
266 fjes_hw_setup_epbuf(&buf_pair->tx, mac,
267 fjes_support_mtu[0]);
268 fjes_hw_setup_epbuf(&buf_pair->rx, mac,
269 fjes_support_mtu[0]);
270 }
271 }
272
273 memset(&param, 0, sizeof(param));
274
275 param.req_len = hw->hw_info.req_buf_size;
276 param.req_start = __pa(hw->hw_info.req_buf);
277 param.res_len = hw->hw_info.res_buf_size;
278 param.res_start = __pa(hw->hw_info.res_buf);
279
280 param.share_start = __pa(hw->hw_info.share->ep_status);
281
282 fjes_hw_init_command_registers(hw, &param);
283
284 return 0;
285}
286
Taku Izumia18aaec2015-08-21 17:29:19 +0900287static void fjes_hw_cleanup(struct fjes_hw *hw)
288{
289 int epidx;
290
291 if (!hw->ep_shm_info)
292 return;
293
294 fjes_hw_free_shared_status_region(hw);
295
296 kfree(hw->hw_info.req_buf);
297 hw->hw_info.req_buf = NULL;
298
299 kfree(hw->hw_info.res_buf);
300 hw->hw_info.res_buf = NULL;
301
302 for (epidx = 0; epidx < hw->max_epid ; epidx++) {
303 if (epidx == hw->my_epid)
304 continue;
305 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
306 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
307 }
308
309 kfree(hw->ep_shm_info);
310 hw->ep_shm_info = NULL;
311}
312
Taku Izumi8cdc3f62015-08-21 17:29:18 +0900313int fjes_hw_init(struct fjes_hw *hw)
314{
315 int ret;
316
317 hw->base = fjes_hw_iomap(hw);
318 if (!hw->base)
319 return -EIO;
320
321 ret = fjes_hw_reset(hw);
322 if (ret)
323 return ret;
324
325 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
326
Taku Izumi785f28e2015-08-21 17:29:35 +0900327 INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task);
328
Taku Izumi8cdc3f62015-08-21 17:29:18 +0900329 mutex_init(&hw->hw_info.lock);
330
331 hw->max_epid = fjes_hw_get_max_epid(hw);
332 hw->my_epid = fjes_hw_get_my_epid(hw);
333
334 if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
335 return -ENXIO;
336
337 ret = fjes_hw_setup(hw);
338
339 return ret;
340}
341
Taku Izumia18aaec2015-08-21 17:29:19 +0900342void fjes_hw_exit(struct fjes_hw *hw)
343{
344 int ret;
345
346 if (hw->base) {
347 ret = fjes_hw_reset(hw);
348 if (ret)
349 pr_err("%s: reset error", __func__);
350
351 fjes_hw_iounmap(hw);
352 hw->base = NULL;
353 }
354
355 fjes_hw_cleanup(hw);
Taku Izumi785f28e2015-08-21 17:29:35 +0900356
357 cancel_work_sync(&hw->update_zone_task);
Taku Izumia18aaec2015-08-21 17:29:19 +0900358}
359
Taku Izumi3bb025d2015-08-21 17:29:21 +0900360static enum fjes_dev_command_response_e
361fjes_hw_issue_request_command(struct fjes_hw *hw,
362 enum fjes_dev_command_request_type type)
363{
364 enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
365 union REG_CR cr;
366 union REG_CS cs;
367 int timeout;
368
369 cr.reg = 0;
370 cr.bits.req_start = 1;
371 cr.bits.req_code = type;
372 wr32(XSCT_CR, cr.reg);
373 cr.reg = rd32(XSCT_CR);
374
375 if (cr.bits.error == 0) {
376 timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
377 cs.reg = rd32(XSCT_CS);
378
379 while ((cs.bits.complete != 1) && timeout > 0) {
380 msleep(1000);
381 cs.reg = rd32(XSCT_CS);
382 timeout -= 1000;
383 }
384
385 if (cs.bits.complete == 1)
386 ret = FJES_CMD_STATUS_NORMAL;
387 else if (timeout <= 0)
388 ret = FJES_CMD_STATUS_TIMEOUT;
389
390 } else {
391 switch (cr.bits.err_info) {
392 case FJES_CMD_REQ_ERR_INFO_PARAM:
393 ret = FJES_CMD_STATUS_ERROR_PARAM;
394 break;
395 case FJES_CMD_REQ_ERR_INFO_STATUS:
396 ret = FJES_CMD_STATUS_ERROR_STATUS;
397 break;
398 default:
399 ret = FJES_CMD_STATUS_UNKNOWN;
400 break;
401 }
402 }
403
404 return ret;
405}
406
407int fjes_hw_request_info(struct fjes_hw *hw)
408{
409 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
410 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
411 enum fjes_dev_command_response_e ret;
412 int result;
413
414 memset(req_buf, 0, hw->hw_info.req_buf_size);
415 memset(res_buf, 0, hw->hw_info.res_buf_size);
416
417 req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN;
418
419 res_buf->info.length = 0;
420 res_buf->info.code = 0;
421
422 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
423
424 result = 0;
425
426 if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
427 res_buf->info.length) {
428 result = -ENOMSG;
429 } else if (ret == FJES_CMD_STATUS_NORMAL) {
430 switch (res_buf->info.code) {
431 case FJES_CMD_REQ_RES_CODE_NORMAL:
432 result = 0;
433 break;
434 default:
435 result = -EPERM;
436 break;
437 }
438 } else {
439 switch (ret) {
440 case FJES_CMD_STATUS_UNKNOWN:
441 result = -EPERM;
442 break;
443 case FJES_CMD_STATUS_TIMEOUT:
444 result = -EBUSY;
445 break;
446 case FJES_CMD_STATUS_ERROR_PARAM:
447 result = -EPERM;
448 break;
449 case FJES_CMD_STATUS_ERROR_STATUS:
450 result = -EPERM;
451 break;
452 default:
453 result = -EPERM;
454 break;
455 }
456 }
457
458 return result;
459}
460
Taku Izumi7950e6c2015-08-21 17:29:22 +0900461int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
462 struct ep_share_mem_info *buf_pair)
463{
464 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
465 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
466 enum fjes_dev_command_response_e ret;
467 int page_count;
468 int timeout;
469 int i, idx;
470 void *addr;
471 int result;
472
473 if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
474 return 0;
475
476 memset(req_buf, 0, hw->hw_info.req_buf_size);
477 memset(res_buf, 0, hw->hw_info.res_buf_size);
478
479 req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
480 buf_pair->tx.size,
481 buf_pair->rx.size);
482 req_buf->share_buffer.epid = dest_epid;
483
484 idx = 0;
485 req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size;
486 page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE;
487 for (i = 0; i < page_count; i++) {
488 addr = ((u8 *)(buf_pair->tx.buffer)) +
489 (i * EP_BUFFER_INFO_SIZE);
490 req_buf->share_buffer.buffer[idx++] =
491 (__le64)(page_to_phys(vmalloc_to_page(addr)) +
492 offset_in_page(addr));
493 }
494
495 req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size;
496 page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE;
497 for (i = 0; i < page_count; i++) {
498 addr = ((u8 *)(buf_pair->rx.buffer)) +
499 (i * EP_BUFFER_INFO_SIZE);
500 req_buf->share_buffer.buffer[idx++] =
501 (__le64)(page_to_phys(vmalloc_to_page(addr)) +
502 offset_in_page(addr));
503 }
504
505 res_buf->share_buffer.length = 0;
506 res_buf->share_buffer.code = 0;
507
508 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
509
510 timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
511 while ((ret == FJES_CMD_STATUS_NORMAL) &&
512 (res_buf->share_buffer.length ==
513 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) &&
514 (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) &&
515 (timeout > 0)) {
516 msleep(200 + hw->my_epid * 20);
517 timeout -= (200 + hw->my_epid * 20);
518
519 res_buf->share_buffer.length = 0;
520 res_buf->share_buffer.code = 0;
521
522 ret = fjes_hw_issue_request_command(
523 hw, FJES_CMD_REQ_SHARE_BUFFER);
524 }
525
526 result = 0;
527
528 if (res_buf->share_buffer.length !=
529 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN)
530 result = -ENOMSG;
531 else if (ret == FJES_CMD_STATUS_NORMAL) {
532 switch (res_buf->share_buffer.code) {
533 case FJES_CMD_REQ_RES_CODE_NORMAL:
534 result = 0;
535 set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
536 break;
537 case FJES_CMD_REQ_RES_CODE_BUSY:
538 result = -EBUSY;
539 break;
540 default:
541 result = -EPERM;
542 break;
543 }
544 } else {
545 switch (ret) {
546 case FJES_CMD_STATUS_UNKNOWN:
547 result = -EPERM;
548 break;
549 case FJES_CMD_STATUS_TIMEOUT:
550 result = -EBUSY;
551 break;
552 case FJES_CMD_STATUS_ERROR_PARAM:
553 case FJES_CMD_STATUS_ERROR_STATUS:
554 default:
555 result = -EPERM;
556 break;
557 }
558 }
559
560 return result;
561}
562
563int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
564{
565 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
566 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
567 struct fjes_device_shared_info *share = hw->hw_info.share;
568 enum fjes_dev_command_response_e ret;
569 int timeout;
570 int result;
571
572 if (!hw->base)
573 return -EPERM;
574
575 if (!req_buf || !res_buf || !share)
576 return -EPERM;
577
578 if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
579 return 0;
580
581 memset(req_buf, 0, hw->hw_info.req_buf_size);
582 memset(res_buf, 0, hw->hw_info.res_buf_size);
583
584 req_buf->unshare_buffer.length =
585 FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN;
586 req_buf->unshare_buffer.epid = dest_epid;
587
588 res_buf->unshare_buffer.length = 0;
589 res_buf->unshare_buffer.code = 0;
590
591 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
592
593 timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
594 while ((ret == FJES_CMD_STATUS_NORMAL) &&
595 (res_buf->unshare_buffer.length ==
596 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) &&
597 (res_buf->unshare_buffer.code ==
598 FJES_CMD_REQ_RES_CODE_BUSY) &&
599 (timeout > 0)) {
600 msleep(200 + hw->my_epid * 20);
601 timeout -= (200 + hw->my_epid * 20);
602
603 res_buf->unshare_buffer.length = 0;
604 res_buf->unshare_buffer.code = 0;
605
606 ret =
607 fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
608 }
609
610 result = 0;
611
612 if (res_buf->unshare_buffer.length !=
613 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
614 result = -ENOMSG;
615 } else if (ret == FJES_CMD_STATUS_NORMAL) {
616 switch (res_buf->unshare_buffer.code) {
617 case FJES_CMD_REQ_RES_CODE_NORMAL:
618 result = 0;
619 clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
620 break;
621 case FJES_CMD_REQ_RES_CODE_BUSY:
622 result = -EBUSY;
623 break;
624 default:
625 result = -EPERM;
626 break;
627 }
628 } else {
629 switch (ret) {
630 case FJES_CMD_STATUS_UNKNOWN:
631 result = -EPERM;
632 break;
633 case FJES_CMD_STATUS_TIMEOUT:
634 result = -EBUSY;
635 break;
636 case FJES_CMD_STATUS_ERROR_PARAM:
637 case FJES_CMD_STATUS_ERROR_STATUS:
638 default:
639 result = -EPERM;
640 break;
641 }
642 }
643
644 return result;
645}
646
Taku Izumie5d486d2015-08-21 17:29:23 +0900647int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid,
648 enum REG_ICTL_MASK mask)
649{
650 u32 ig = mask | dest_epid;
651
652 wr32(XSCT_IG, cpu_to_le32(ig));
653
654 return 0;
655}
656
657u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw)
658{
659 u32 cur_is;
660
661 cur_is = rd32(XSCT_IS);
662
663 return cur_is;
664}
665
Taku Izumi8cdc3f62015-08-21 17:29:18 +0900666void fjes_hw_set_irqmask(struct fjes_hw *hw,
667 enum REG_ICTL_MASK intr_mask, bool mask)
668{
669 if (mask)
670 wr32(XSCT_IMS, intr_mask);
671 else
672 wr32(XSCT_IMC, intr_mask);
673}
Taku Izumie5d486d2015-08-21 17:29:23 +0900674
675bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid)
676{
677 if (epid >= hw->max_epid)
678 return false;
679
680 if ((hw->ep_shm_info[epid].es_status !=
681 FJES_ZONING_STATUS_ENABLE) ||
682 (hw->ep_shm_info[hw->my_epid].zone ==
683 FJES_ZONING_ZONE_TYPE_NONE))
684 return false;
685 else
686 return (hw->ep_shm_info[epid].zone ==
687 hw->ep_shm_info[hw->my_epid].zone);
688}
689
690int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share,
691 int dest_epid)
692{
693 int value = false;
694
695 if (dest_epid < share->epnum)
696 value = share->ep_status[dest_epid];
697
698 return value;
699}
700
701static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid)
702{
703 return test_bit(src_epid, &hw->txrx_stop_req_bit);
704}
705
706static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid)
707{
708 return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status &
709 FJES_RX_STOP_REQ_DONE);
710}
711
712enum ep_partner_status
713fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
714{
715 enum ep_partner_status status;
716
717 if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) {
718 if (fjes_hw_epid_is_stop_requested(hw, epid)) {
719 status = EP_PARTNER_WAITING;
720 } else {
721 if (fjes_hw_epid_is_stop_process_done(hw, epid))
722 status = EP_PARTNER_COMPLETE;
723 else
724 status = EP_PARTNER_SHARED;
725 }
726 } else {
727 status = EP_PARTNER_UNSHARE;
728 }
729
730 return status;
731}
732
733void fjes_hw_raise_epstop(struct fjes_hw *hw)
734{
735 enum ep_partner_status status;
736 int epidx;
737
738 for (epidx = 0; epidx < hw->max_epid; epidx++) {
739 if (epidx == hw->my_epid)
740 continue;
741
742 status = fjes_hw_get_partner_ep_status(hw, epidx);
743 switch (status) {
744 case EP_PARTNER_SHARED:
745 fjes_hw_raise_interrupt(hw, epidx,
746 REG_ICTL_MASK_TXRX_STOP_REQ);
747 break;
748 default:
749 break;
750 }
751
752 set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
753 set_bit(epidx, &hw->txrx_stop_req_bit);
754
755 hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
756 FJES_RX_STOP_REQ_REQUEST;
757 }
758}
759
760int fjes_hw_wait_epstop(struct fjes_hw *hw)
761{
762 enum ep_partner_status status;
763 union ep_buffer_info *info;
764 int wait_time = 0;
765 int epidx;
766
767 while (hw->hw_info.buffer_unshare_reserve_bit &&
768 (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) {
769 for (epidx = 0; epidx < hw->max_epid; epidx++) {
770 if (epidx == hw->my_epid)
771 continue;
772 status = fjes_hw_epid_is_shared(hw->hw_info.share,
773 epidx);
774 info = hw->ep_shm_info[epidx].rx.info;
775 if ((!status ||
776 (info->v1i.rx_status &
777 FJES_RX_STOP_REQ_DONE)) &&
778 test_bit(epidx,
779 &hw->hw_info.buffer_unshare_reserve_bit)) {
780 clear_bit(epidx,
781 &hw->hw_info.buffer_unshare_reserve_bit);
782 }
783 }
784
785 msleep(100);
786 wait_time += 100;
787 }
788
789 for (epidx = 0; epidx < hw->max_epid; epidx++) {
790 if (epidx == hw->my_epid)
791 continue;
792 if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit))
793 clear_bit(epidx,
794 &hw->hw_info.buffer_unshare_reserve_bit);
795 }
796
797 return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)
798 ? 0 : -EBUSY;
799}
Taku Izumi9acf51c2015-08-21 17:29:24 +0900800
801bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version)
802{
803 union ep_buffer_info *info = epbh->info;
804
805 return (info->common.version == version);
806}
807
808bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
809{
810 union ep_buffer_info *info = epbh->info;
811
812 return (info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu));
813}
814
815bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
816{
817 union ep_buffer_info *info = epbh->info;
818 bool ret = false;
819 int i;
820
821 if (vlan_id == 0) {
822 ret = true;
823 } else {
824 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
825 if (vlan_id == info->v1i.vlan_id[i]) {
826 ret = true;
827 break;
828 }
829 }
830 }
831 return ret;
832}
833
Taku Izumi3e3fedd2015-08-21 17:29:31 +0900834bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
835{
836 union ep_buffer_info *info = epbh->info;
837 int i;
838
839 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
840 if (info->v1i.vlan_id[i] == 0) {
841 info->v1i.vlan_id[i] = vlan_id;
842 return true;
843 }
844 }
845 return false;
846}
847
848void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
849{
850 union ep_buffer_info *info = epbh->info;
851 int i;
852
853 if (0 != vlan_id) {
854 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
855 if (vlan_id == info->v1i.vlan_id[i])
856 info->v1i.vlan_id[i] = 0;
857 }
858 }
859}
860
Taku Izumi26585932015-08-21 17:29:27 +0900861bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
862{
863 union ep_buffer_info *info = epbh->info;
864
865 if (info->v1i.count_max == 0)
866 return true;
867
868 return EP_RING_EMPTY(info->v1i.head, info->v1i.tail,
869 info->v1i.count_max);
870}
871
872void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh,
873 size_t *psize)
874{
875 union ep_buffer_info *info = epbh->info;
876 struct esmem_frame *ring_frame;
877 void *frame;
878
879 ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
880 (info->v1i.head,
881 info->v1i.count_max) *
882 info->v1i.frame_max]);
883
884 *psize = (size_t)ring_frame->frame_size;
885
886 frame = ring_frame->frame_data;
887
888 return frame;
889}
890
891void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh)
892{
893 union ep_buffer_info *info = epbh->info;
894
895 if (fjes_hw_epbuf_rx_is_empty(epbh))
896 return;
897
898 EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max);
899}
900
Taku Izumi9acf51c2015-08-21 17:29:24 +0900901int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh,
902 void *frame, size_t size)
903{
904 union ep_buffer_info *info = epbh->info;
905 struct esmem_frame *ring_frame;
906
907 if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max))
908 return -ENOBUFS;
909
910 ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
911 (info->v1i.tail - 1,
912 info->v1i.count_max) *
913 info->v1i.frame_max]);
914
915 ring_frame->frame_size = size;
916 memcpy((void *)(ring_frame->frame_data), (void *)frame, size);
917
918 EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max);
919
920 return 0;
921}
Taku Izumi785f28e2015-08-21 17:29:35 +0900922
923static void fjes_hw_update_zone_task(struct work_struct *work)
924{
925 struct fjes_hw *hw = container_of(work,
926 struct fjes_hw, update_zone_task);
927
928 struct my_s {u8 es_status; u8 zone; } *info;
929 union fjes_device_command_res *res_buf;
930 enum ep_partner_status pstatus;
931
932 struct fjes_adapter *adapter;
933 struct net_device *netdev;
934
935 ulong unshare_bit = 0;
936 ulong share_bit = 0;
937 ulong irq_bit = 0;
938
939 int epidx;
940 int ret;
941
942 adapter = (struct fjes_adapter *)hw->back;
943 netdev = adapter->netdev;
944 res_buf = hw->hw_info.res_buf;
945 info = (struct my_s *)&res_buf->info.info;
946
947 mutex_lock(&hw->hw_info.lock);
948
949 ret = fjes_hw_request_info(hw);
950 switch (ret) {
951 case -ENOMSG:
952 case -EBUSY:
953 default:
954 if (!work_pending(&adapter->force_close_task)) {
955 adapter->force_reset = true;
956 schedule_work(&adapter->force_close_task);
957 }
958 break;
959
960 case 0:
961
962 for (epidx = 0; epidx < hw->max_epid; epidx++) {
963 if (epidx == hw->my_epid) {
964 hw->ep_shm_info[epidx].es_status =
965 info[epidx].es_status;
966 hw->ep_shm_info[epidx].zone =
967 info[epidx].zone;
968 continue;
969 }
970
971 pstatus = fjes_hw_get_partner_ep_status(hw, epidx);
972 switch (pstatus) {
973 case EP_PARTNER_UNSHARE:
974 default:
975 if ((info[epidx].zone !=
976 FJES_ZONING_ZONE_TYPE_NONE) &&
977 (info[epidx].es_status ==
978 FJES_ZONING_STATUS_ENABLE) &&
979 (info[epidx].zone ==
980 info[hw->my_epid].zone))
981 set_bit(epidx, &share_bit);
982 else
983 set_bit(epidx, &unshare_bit);
984 break;
985
986 case EP_PARTNER_COMPLETE:
987 case EP_PARTNER_WAITING:
988 if ((info[epidx].zone ==
989 FJES_ZONING_ZONE_TYPE_NONE) ||
990 (info[epidx].es_status !=
991 FJES_ZONING_STATUS_ENABLE) ||
992 (info[epidx].zone !=
993 info[hw->my_epid].zone)) {
994 set_bit(epidx,
995 &adapter->unshare_watch_bitmask);
996 set_bit(epidx,
997 &hw->hw_info.buffer_unshare_reserve_bit);
998 }
999 break;
1000
1001 case EP_PARTNER_SHARED:
1002 if ((info[epidx].zone ==
1003 FJES_ZONING_ZONE_TYPE_NONE) ||
1004 (info[epidx].es_status !=
1005 FJES_ZONING_STATUS_ENABLE) ||
1006 (info[epidx].zone !=
1007 info[hw->my_epid].zone))
1008 set_bit(epidx, &irq_bit);
1009 break;
1010 }
1011 }
1012
1013 hw->ep_shm_info[epidx].es_status = info[epidx].es_status;
1014 hw->ep_shm_info[epidx].zone = info[epidx].zone;
1015
1016 break;
1017 }
1018
1019 mutex_unlock(&hw->hw_info.lock);
1020
1021 for (epidx = 0; epidx < hw->max_epid; epidx++) {
1022 if (epidx == hw->my_epid)
1023 continue;
1024
1025 if (test_bit(epidx, &share_bit)) {
1026 fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
1027 netdev->dev_addr, netdev->mtu);
1028
1029 mutex_lock(&hw->hw_info.lock);
1030
1031 ret = fjes_hw_register_buff_addr(
1032 hw, epidx, &hw->ep_shm_info[epidx]);
1033
1034 switch (ret) {
1035 case 0:
1036 break;
1037 case -ENOMSG:
1038 case -EBUSY:
1039 default:
1040 if (!work_pending(&adapter->force_close_task)) {
1041 adapter->force_reset = true;
1042 schedule_work(
1043 &adapter->force_close_task);
1044 }
1045 break;
1046 }
1047 mutex_unlock(&hw->hw_info.lock);
1048 }
1049
1050 if (test_bit(epidx, &unshare_bit)) {
1051 mutex_lock(&hw->hw_info.lock);
1052
1053 ret = fjes_hw_unregister_buff_addr(hw, epidx);
1054
1055 switch (ret) {
1056 case 0:
1057 break;
1058 case -ENOMSG:
1059 case -EBUSY:
1060 default:
1061 if (!work_pending(&adapter->force_close_task)) {
1062 adapter->force_reset = true;
1063 schedule_work(
1064 &adapter->force_close_task);
1065 }
1066 break;
1067 }
1068
1069 mutex_unlock(&hw->hw_info.lock);
1070
1071 if (ret == 0)
1072 fjes_hw_setup_epbuf(
1073 &hw->ep_shm_info[epidx].tx,
1074 netdev->dev_addr, netdev->mtu);
1075 }
1076
1077 if (test_bit(epidx, &irq_bit)) {
1078 fjes_hw_raise_interrupt(hw, epidx,
1079 REG_ICTL_MASK_TXRX_STOP_REQ);
1080
1081 set_bit(epidx, &hw->txrx_stop_req_bit);
1082 hw->ep_shm_info[epidx].tx.
1083 info->v1i.rx_status |=
1084 FJES_RX_STOP_REQ_REQUEST;
1085 set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
1086 }
1087 }
1088
1089 if (irq_bit || adapter->unshare_watch_bitmask) {
1090 if (!work_pending(&adapter->unshare_watch_task))
1091 queue_work(adapter->control_wq,
1092 &adapter->unshare_watch_task);
1093 }
1094}