blob: 13923f4127af200ccfe14278fd54faabcf5db79e [file] [log] [blame]
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02001// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "goyaP.h"
Omer Shpigelman0feaf862019-02-16 00:39:22 +02009#include "include/hw_ip/mmu/mmu_general.h"
10#include "include/hw_ip/mmu/mmu_v1_0.h"
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020011#include "include/goya/asic_reg/goya_masks.h"
12
13#include <linux/pci.h>
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020014#include <linux/genalloc.h>
Oded Gabbay839c4802019-02-16 00:39:16 +020015#include <linux/firmware.h>
Oded Gabbayd91389b2019-02-16 00:39:19 +020016#include <linux/hwmon.h>
Oded Gabbay839c4802019-02-16 00:39:16 +020017#include <linux/io-64-nonatomic-lo-hi.h>
18#include <linux/io-64-nonatomic-hi-lo.h>
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020019
20/*
21 * GOYA security scheme:
22 *
23 * 1. Host is protected by:
24 * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
25 * - MMU
26 *
27 * 2. DRAM is protected by:
28 * - Range registers (protect the first 512MB)
29 * - MMU (isolation between users)
30 *
31 * 3. Configuration is protected by:
32 * - Range registers
33 * - Protection bits
34 *
35 * When MMU is disabled:
36 *
37 * QMAN DMA: PQ, CQ, CP, DMA are secured.
38 * PQ, CB and the data are on the host.
39 *
40 * QMAN TPC/MME:
41 * PQ, CQ and CP are not secured.
42 * PQ, CB and the data are on the SRAM/DRAM.
43 *
44 * Since QMAN DMA is secured, KMD is parsing the DMA CB:
45 * - KMD checks DMA pointer
46 * - WREG, MSG_PROT are not allowed.
47 * - MSG_LONG/SHORT are allowed.
48 *
49 * A read/write transaction by the QMAN to a protected area will succeed if
50 * and only if the QMAN's CP is secured and MSG_PROT is used
51 *
52 *
53 * When MMU is enabled:
54 *
55 * QMAN DMA: PQ, CQ and CP are secured.
56 * MMU is set to bypass on the Secure props register of the QMAN.
57 * The reasons we don't enable MMU for PQ, CQ and CP are:
58 * - PQ entry is in kernel address space and KMD doesn't map it.
59 * - CP writes to MSIX register and to kernel address space (completion
60 * queue).
61 *
62 * DMA is not secured but because CP is secured, KMD still needs to parse the
63 * CB, but doesn't need to check the DMA addresses.
64 *
65 * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD
66 * doesn't map memory in MMU.
67 *
68 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
69 *
70 * DMA RR does NOT protect host because DMA is not secured
71 *
72 */
73
74#define GOYA_MMU_REGS_NUM 61
75
76#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
77
78#define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
79#define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
80#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
81#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
82#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
83#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
84#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
Omer Shpigelman0feaf862019-02-16 00:39:22 +020085#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
Omer Shpigelman3dccd182019-02-28 10:46:16 +020086#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020087
88#define GOYA_QMAN0_FENCE_VAL 0xD169B243
89
90#define GOYA_MAX_INITIATORS 20
91
Oded Gabbay1251f232019-02-16 00:39:18 +020092#define GOYA_MAX_STRING_LEN 20
93
Oded Gabbaybe5d9262019-02-16 00:39:15 +020094#define GOYA_CB_POOL_CB_CNT 512
95#define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
96
Oded Gabbay1251f232019-02-16 00:39:18 +020097static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
98 "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
99 "goya cq 4", "goya cpu eq"
100};
101
Oded Gabbayeff6f4a2019-02-16 00:39:21 +0200102static u16 goya_packet_sizes[MAX_PACKET_ID] = {
103 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
104 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
105 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
106 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
107 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
108 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
109 [PACKET_FENCE] = sizeof(struct packet_fence),
110 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
111 [PACKET_NOP] = sizeof(struct packet_nop),
112 [PACKET_STOP] = sizeof(struct packet_stop)
113};
114
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200115static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
116 mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
117 mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
118 mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
119 mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
120 mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
121 mmTPC0_QM_GLBL_SECURE_PROPS,
122 mmTPC0_QM_GLBL_NON_SECURE_PROPS,
123 mmTPC0_CMDQ_GLBL_SECURE_PROPS,
124 mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
125 mmTPC0_CFG_ARUSER,
126 mmTPC0_CFG_AWUSER,
127 mmTPC1_QM_GLBL_SECURE_PROPS,
128 mmTPC1_QM_GLBL_NON_SECURE_PROPS,
129 mmTPC1_CMDQ_GLBL_SECURE_PROPS,
130 mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
131 mmTPC1_CFG_ARUSER,
132 mmTPC1_CFG_AWUSER,
133 mmTPC2_QM_GLBL_SECURE_PROPS,
134 mmTPC2_QM_GLBL_NON_SECURE_PROPS,
135 mmTPC2_CMDQ_GLBL_SECURE_PROPS,
136 mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
137 mmTPC2_CFG_ARUSER,
138 mmTPC2_CFG_AWUSER,
139 mmTPC3_QM_GLBL_SECURE_PROPS,
140 mmTPC3_QM_GLBL_NON_SECURE_PROPS,
141 mmTPC3_CMDQ_GLBL_SECURE_PROPS,
142 mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
143 mmTPC3_CFG_ARUSER,
144 mmTPC3_CFG_AWUSER,
145 mmTPC4_QM_GLBL_SECURE_PROPS,
146 mmTPC4_QM_GLBL_NON_SECURE_PROPS,
147 mmTPC4_CMDQ_GLBL_SECURE_PROPS,
148 mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
149 mmTPC4_CFG_ARUSER,
150 mmTPC4_CFG_AWUSER,
151 mmTPC5_QM_GLBL_SECURE_PROPS,
152 mmTPC5_QM_GLBL_NON_SECURE_PROPS,
153 mmTPC5_CMDQ_GLBL_SECURE_PROPS,
154 mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
155 mmTPC5_CFG_ARUSER,
156 mmTPC5_CFG_AWUSER,
157 mmTPC6_QM_GLBL_SECURE_PROPS,
158 mmTPC6_QM_GLBL_NON_SECURE_PROPS,
159 mmTPC6_CMDQ_GLBL_SECURE_PROPS,
160 mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
161 mmTPC6_CFG_ARUSER,
162 mmTPC6_CFG_AWUSER,
163 mmTPC7_QM_GLBL_SECURE_PROPS,
164 mmTPC7_QM_GLBL_NON_SECURE_PROPS,
165 mmTPC7_CMDQ_GLBL_SECURE_PROPS,
166 mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
167 mmTPC7_CFG_ARUSER,
168 mmTPC7_CFG_AWUSER,
169 mmMME_QM_GLBL_SECURE_PROPS,
170 mmMME_QM_GLBL_NON_SECURE_PROPS,
171 mmMME_CMDQ_GLBL_SECURE_PROPS,
172 mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
173 mmMME_SBA_CONTROL_DATA,
174 mmMME_SBB_CONTROL_DATA,
175 mmMME_SBC_CONTROL_DATA,
176 mmMME_WBC_CONTROL_DATA
177};
178
Oded Gabbay1251f232019-02-16 00:39:18 +0200179#define GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE 121
180
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +0200181static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = {
182 GOYA_ASYNC_EVENT_ID_PCIE_IF,
183 GOYA_ASYNC_EVENT_ID_TPC0_ECC,
184 GOYA_ASYNC_EVENT_ID_TPC1_ECC,
185 GOYA_ASYNC_EVENT_ID_TPC2_ECC,
186 GOYA_ASYNC_EVENT_ID_TPC3_ECC,
187 GOYA_ASYNC_EVENT_ID_TPC4_ECC,
188 GOYA_ASYNC_EVENT_ID_TPC5_ECC,
189 GOYA_ASYNC_EVENT_ID_TPC6_ECC,
190 GOYA_ASYNC_EVENT_ID_TPC7_ECC,
191 GOYA_ASYNC_EVENT_ID_MME_ECC,
192 GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
193 GOYA_ASYNC_EVENT_ID_MMU_ECC,
194 GOYA_ASYNC_EVENT_ID_DMA_MACRO,
195 GOYA_ASYNC_EVENT_ID_DMA_ECC,
196 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
197 GOYA_ASYNC_EVENT_ID_PSOC_MEM,
198 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
199 GOYA_ASYNC_EVENT_ID_SRAM0,
200 GOYA_ASYNC_EVENT_ID_SRAM1,
201 GOYA_ASYNC_EVENT_ID_SRAM2,
202 GOYA_ASYNC_EVENT_ID_SRAM3,
203 GOYA_ASYNC_EVENT_ID_SRAM4,
204 GOYA_ASYNC_EVENT_ID_SRAM5,
205 GOYA_ASYNC_EVENT_ID_SRAM6,
206 GOYA_ASYNC_EVENT_ID_SRAM7,
207 GOYA_ASYNC_EVENT_ID_SRAM8,
208 GOYA_ASYNC_EVENT_ID_SRAM9,
209 GOYA_ASYNC_EVENT_ID_SRAM10,
210 GOYA_ASYNC_EVENT_ID_SRAM11,
211 GOYA_ASYNC_EVENT_ID_SRAM12,
212 GOYA_ASYNC_EVENT_ID_SRAM13,
213 GOYA_ASYNC_EVENT_ID_SRAM14,
214 GOYA_ASYNC_EVENT_ID_SRAM15,
215 GOYA_ASYNC_EVENT_ID_SRAM16,
216 GOYA_ASYNC_EVENT_ID_SRAM17,
217 GOYA_ASYNC_EVENT_ID_SRAM18,
218 GOYA_ASYNC_EVENT_ID_SRAM19,
219 GOYA_ASYNC_EVENT_ID_SRAM20,
220 GOYA_ASYNC_EVENT_ID_SRAM21,
221 GOYA_ASYNC_EVENT_ID_SRAM22,
222 GOYA_ASYNC_EVENT_ID_SRAM23,
223 GOYA_ASYNC_EVENT_ID_SRAM24,
224 GOYA_ASYNC_EVENT_ID_SRAM25,
225 GOYA_ASYNC_EVENT_ID_SRAM26,
226 GOYA_ASYNC_EVENT_ID_SRAM27,
227 GOYA_ASYNC_EVENT_ID_SRAM28,
228 GOYA_ASYNC_EVENT_ID_SRAM29,
229 GOYA_ASYNC_EVENT_ID_GIC500,
230 GOYA_ASYNC_EVENT_ID_PLL0,
231 GOYA_ASYNC_EVENT_ID_PLL1,
232 GOYA_ASYNC_EVENT_ID_PLL3,
233 GOYA_ASYNC_EVENT_ID_PLL4,
234 GOYA_ASYNC_EVENT_ID_PLL5,
235 GOYA_ASYNC_EVENT_ID_PLL6,
236 GOYA_ASYNC_EVENT_ID_AXI_ECC,
237 GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
238 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
239 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
240 GOYA_ASYNC_EVENT_ID_PCIE_DEC,
241 GOYA_ASYNC_EVENT_ID_TPC0_DEC,
242 GOYA_ASYNC_EVENT_ID_TPC1_DEC,
243 GOYA_ASYNC_EVENT_ID_TPC2_DEC,
244 GOYA_ASYNC_EVENT_ID_TPC3_DEC,
245 GOYA_ASYNC_EVENT_ID_TPC4_DEC,
246 GOYA_ASYNC_EVENT_ID_TPC5_DEC,
247 GOYA_ASYNC_EVENT_ID_TPC6_DEC,
248 GOYA_ASYNC_EVENT_ID_TPC7_DEC,
249 GOYA_ASYNC_EVENT_ID_MME_WACS,
250 GOYA_ASYNC_EVENT_ID_MME_WACSD,
251 GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
252 GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
253 GOYA_ASYNC_EVENT_ID_PSOC,
254 GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
255 GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
256 GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
257 GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
258 GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
259 GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
260 GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
261 GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
262 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
263 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
264 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
265 GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
266 GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
267 GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
268 GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
269 GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
270 GOYA_ASYNC_EVENT_ID_TPC0_QM,
271 GOYA_ASYNC_EVENT_ID_TPC1_QM,
272 GOYA_ASYNC_EVENT_ID_TPC2_QM,
273 GOYA_ASYNC_EVENT_ID_TPC3_QM,
274 GOYA_ASYNC_EVENT_ID_TPC4_QM,
275 GOYA_ASYNC_EVENT_ID_TPC5_QM,
276 GOYA_ASYNC_EVENT_ID_TPC6_QM,
277 GOYA_ASYNC_EVENT_ID_TPC7_QM,
278 GOYA_ASYNC_EVENT_ID_MME_QM,
279 GOYA_ASYNC_EVENT_ID_MME_CMDQ,
280 GOYA_ASYNC_EVENT_ID_DMA0_QM,
281 GOYA_ASYNC_EVENT_ID_DMA1_QM,
282 GOYA_ASYNC_EVENT_ID_DMA2_QM,
283 GOYA_ASYNC_EVENT_ID_DMA3_QM,
284 GOYA_ASYNC_EVENT_ID_DMA4_QM,
285 GOYA_ASYNC_EVENT_ID_DMA0_CH,
286 GOYA_ASYNC_EVENT_ID_DMA1_CH,
287 GOYA_ASYNC_EVENT_ID_DMA2_CH,
288 GOYA_ASYNC_EVENT_ID_DMA3_CH,
289 GOYA_ASYNC_EVENT_ID_DMA4_CH,
290 GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
291 GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
292 GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
293 GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
294 GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
295 GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
296 GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
297 GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
298 GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
299 GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
300 GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
301 GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
302 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
303};
304
Oded Gabbayd91389b2019-02-16 00:39:19 +0200305static int goya_armcp_info_get(struct hl_device *hdev);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200306static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
307static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200308static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200309static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
310 u64 phys_addr);
Oded Gabbayd91389b2019-02-16 00:39:19 +0200311
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200312static void goya_get_fixed_properties(struct hl_device *hdev)
313{
314 struct asic_fixed_properties *prop = &hdev->asic_prop;
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200315 int i;
316
317 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
318 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
319 prop->hw_queues_props[i].kmd_only = 0;
320 }
321
322 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
323 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
324 prop->hw_queues_props[i].kmd_only = 1;
325 }
326
327 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
328 NUMBER_OF_INT_HW_QUEUES; i++) {
329 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
330 prop->hw_queues_props[i].kmd_only = 0;
331 }
332
333 for (; i < HL_MAX_QUEUES; i++)
334 prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200335
336 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
337
338 prop->dram_base_address = DRAM_PHYS_BASE;
339 prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
340 prop->dram_end_address = prop->dram_base_address + prop->dram_size;
341 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
342
343 prop->sram_base_address = SRAM_BASE_ADDR;
344 prop->sram_size = SRAM_SIZE;
345 prop->sram_end_address = prop->sram_base_address + prop->sram_size;
346 prop->sram_user_base_address = prop->sram_base_address +
347 SRAM_USER_BASE_OFFSET;
348
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200349 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200350 prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200351 if (hdev->pldm)
352 prop->mmu_pgt_size = 0x800000; /* 8MB */
353 else
354 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
355 prop->mmu_pte_size = HL_PTE_SIZE;
356 prop->mmu_hop_table_size = HOP_TABLE_SIZE;
357 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
358 prop->dram_page_size = PAGE_SIZE_2MB;
359
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200360 prop->host_phys_base_address = HOST_PHYS_BASE;
361 prop->va_space_host_start_address = VA_HOST_SPACE_START;
362 prop->va_space_host_end_address = VA_HOST_SPACE_END;
363 prop->va_space_dram_start_address = VA_DDR_SPACE_START;
364 prop->va_space_dram_end_address = VA_DDR_SPACE_END;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200365 prop->dram_size_for_default_page_mapping =
366 prop->va_space_dram_end_address;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200367 prop->cfg_size = CFG_SIZE;
368 prop->max_asid = MAX_ASID;
Oded Gabbay1251f232019-02-16 00:39:18 +0200369 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
Oded Gabbay839c4802019-02-16 00:39:16 +0200370 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
371 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200372 prop->max_power_default = MAX_POWER_DEFAULT;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200373 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
374
375 prop->high_pll = PLL_HIGH_DEFAULT;
376}
377
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200378int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
379{
380 struct armcp_packet pkt;
381
382 memset(&pkt, 0, sizeof(pkt));
383
384 pkt.ctl = opcode << ARMCP_PKT_CTL_OPCODE_SHIFT;
385
386 return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
387 sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
388}
389
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200390/*
391 * goya_pci_bars_map - Map PCI BARS of Goya device
392 *
393 * @hdev: pointer to hl_device structure
394 *
395 * Request PCI regions and map them to kernel virtual addresses.
396 * Returns 0 on success
397 *
398 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200399static int goya_pci_bars_map(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200400{
401 struct pci_dev *pdev = hdev->pdev;
402 int rc;
403
404 rc = pci_request_regions(pdev, HL_NAME);
405 if (rc) {
406 dev_err(hdev->dev, "Cannot obtain PCI resources\n");
407 return rc;
408 }
409
410 hdev->pcie_bar[SRAM_CFG_BAR_ID] =
411 pci_ioremap_bar(pdev, SRAM_CFG_BAR_ID);
412 if (!hdev->pcie_bar[SRAM_CFG_BAR_ID]) {
413 dev_err(hdev->dev, "pci_ioremap_bar failed for CFG\n");
414 rc = -ENODEV;
415 goto err_release_regions;
416 }
417
418 hdev->pcie_bar[MSIX_BAR_ID] = pci_ioremap_bar(pdev, MSIX_BAR_ID);
419 if (!hdev->pcie_bar[MSIX_BAR_ID]) {
420 dev_err(hdev->dev, "pci_ioremap_bar failed for MSIX\n");
421 rc = -ENODEV;
422 goto err_unmap_sram_cfg;
423 }
424
425 hdev->pcie_bar[DDR_BAR_ID] = pci_ioremap_wc_bar(pdev, DDR_BAR_ID);
426 if (!hdev->pcie_bar[DDR_BAR_ID]) {
427 dev_err(hdev->dev, "pci_ioremap_bar failed for DDR\n");
428 rc = -ENODEV;
429 goto err_unmap_msix;
430 }
431
432 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
433 (CFG_BASE - SRAM_BASE_ADDR);
434
435 return 0;
436
437err_unmap_msix:
438 iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
439err_unmap_sram_cfg:
440 iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
441err_release_regions:
442 pci_release_regions(pdev);
443
444 return rc;
445}
446
447/*
448 * goya_pci_bars_unmap - Unmap PCI BARS of Goya device
449 *
450 * @hdev: pointer to hl_device structure
451 *
452 * Release all PCI BARS and unmap their virtual addresses
453 *
454 */
455static void goya_pci_bars_unmap(struct hl_device *hdev)
456{
457 struct pci_dev *pdev = hdev->pdev;
458
459 iounmap(hdev->pcie_bar[DDR_BAR_ID]);
460 iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
461 iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
462 pci_release_regions(pdev);
463}
464
465/*
466 * goya_elbi_write - Write through the ELBI interface
467 *
468 * @hdev: pointer to hl_device structure
469 *
470 * return 0 on success, -1 on failure
471 *
472 */
473static int goya_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
474{
475 struct pci_dev *pdev = hdev->pdev;
476 ktime_t timeout;
477 u32 val;
478
479 /* Clear previous status */
480 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
481
482 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
483 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
484 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
485 PCI_CONFIG_ELBI_CTRL_WRITE);
486
487 timeout = ktime_add_ms(ktime_get(), 10);
488 for (;;) {
489 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
490 if (val & PCI_CONFIG_ELBI_STS_MASK)
491 break;
492 if (ktime_compare(ktime_get(), timeout) > 0) {
493 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
494 &val);
495 break;
496 }
497 usleep_range(300, 500);
498 }
499
500 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
501 return 0;
502
503 if (val & PCI_CONFIG_ELBI_STS_ERR) {
504 dev_err(hdev->dev, "Error writing to ELBI\n");
505 return -EIO;
506 }
507
508 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
509 dev_err(hdev->dev, "ELBI write didn't finish in time\n");
510 return -EIO;
511 }
512
513 dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
514 return -EIO;
515}
516
517/*
518 * goya_iatu_write - iatu write routine
519 *
520 * @hdev: pointer to hl_device structure
521 *
522 */
523static int goya_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
524{
525 u32 dbi_offset;
526 int rc;
527
528 dbi_offset = addr & 0xFFF;
529
530 rc = goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0x00300000);
531 rc |= goya_elbi_write(hdev, mmPCIE_DBI_BASE + dbi_offset, data);
532
533 if (rc)
534 return -EIO;
535
536 return 0;
537}
538
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200539static void goya_reset_link_through_bridge(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200540{
541 struct pci_dev *pdev = hdev->pdev;
542 struct pci_dev *parent_port;
543 u16 val;
544
545 parent_port = pdev->bus->self;
546 pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
547 val |= PCI_BRIDGE_CTL_BUS_RESET;
548 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
549 ssleep(1);
550
551 val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
552 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
553 ssleep(3);
554}
555
556/*
557 * goya_set_ddr_bar_base - set DDR bar to map specific device address
558 *
559 * @hdev: pointer to hl_device structure
560 * @addr: address in DDR. Must be aligned to DDR bar size
561 *
562 * This function configures the iATU so that the DDR bar will start at the
563 * specified addr.
564 *
565 */
566static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
567{
568 struct goya_device *goya = hdev->asic_specific;
569 int rc;
570
571 if ((goya) && (goya->ddr_bar_cur_addr == addr))
572 return 0;
573
574 /* Inbound Region 1 - Bar 4 - Point to DDR */
575 rc = goya_iatu_write(hdev, 0x314, lower_32_bits(addr));
576 rc |= goya_iatu_write(hdev, 0x318, upper_32_bits(addr));
577 rc |= goya_iatu_write(hdev, 0x300, 0);
578 /* Enable + Bar match + match enable + Bar 4 */
579 rc |= goya_iatu_write(hdev, 0x304, 0xC0080400);
580
581 /* Return the DBI window to the default location */
582 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
583 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
584
585 if (rc) {
586 dev_err(hdev->dev, "failed to map DDR bar to 0x%08llx\n", addr);
587 return -EIO;
588 }
589
590 if (goya)
591 goya->ddr_bar_cur_addr = addr;
592
593 return 0;
594}
595
596/*
597 * goya_init_iatu - Initialize the iATU unit inside the PCI controller
598 *
599 * @hdev: pointer to hl_device structure
600 *
601 * This is needed in case the firmware doesn't initialize the iATU
602 *
603 */
604static int goya_init_iatu(struct hl_device *hdev)
605{
606 int rc;
607
608 /* Inbound Region 0 - Bar 0 - Point to SRAM_BASE_ADDR */
609 rc = goya_iatu_write(hdev, 0x114, lower_32_bits(SRAM_BASE_ADDR));
610 rc |= goya_iatu_write(hdev, 0x118, upper_32_bits(SRAM_BASE_ADDR));
611 rc |= goya_iatu_write(hdev, 0x100, 0);
612 /* Enable + Bar match + match enable */
613 rc |= goya_iatu_write(hdev, 0x104, 0xC0080000);
614
615 /* Inbound Region 1 - Bar 4 - Point to DDR */
616 rc |= goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
617
618 /* Outbound Region 0 - Point to Host */
619 rc |= goya_iatu_write(hdev, 0x008, lower_32_bits(HOST_PHYS_BASE));
620 rc |= goya_iatu_write(hdev, 0x00C, upper_32_bits(HOST_PHYS_BASE));
621 rc |= goya_iatu_write(hdev, 0x010,
622 lower_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
623 rc |= goya_iatu_write(hdev, 0x014, 0);
624 rc |= goya_iatu_write(hdev, 0x018, 0);
625 rc |= goya_iatu_write(hdev, 0x020,
626 upper_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
627 /* Increase region size */
628 rc |= goya_iatu_write(hdev, 0x000, 0x00002000);
629 /* Enable */
630 rc |= goya_iatu_write(hdev, 0x004, 0x80000000);
631
632 /* Return the DBI window to the default location */
633 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
634 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
635
636 if (rc)
637 return -EIO;
638
639 return 0;
640}
641
642/*
643 * goya_early_init - GOYA early initialization code
644 *
645 * @hdev: pointer to hl_device structure
646 *
647 * Verify PCI bars
648 * Set DMA masks
649 * PCI controller initialization
650 * Map PCI bars
651 *
652 */
653static int goya_early_init(struct hl_device *hdev)
654{
655 struct asic_fixed_properties *prop = &hdev->asic_prop;
656 struct pci_dev *pdev = hdev->pdev;
657 u32 val;
658 int rc;
659
660 goya_get_fixed_properties(hdev);
661
662 /* Check BAR sizes */
663 if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
664 dev_err(hdev->dev,
665 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
666 SRAM_CFG_BAR_ID,
667 (unsigned long long) pci_resource_len(pdev,
668 SRAM_CFG_BAR_ID),
669 CFG_BAR_SIZE);
670 return -ENODEV;
671 }
672
673 if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
674 dev_err(hdev->dev,
675 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
676 MSIX_BAR_ID,
677 (unsigned long long) pci_resource_len(pdev,
678 MSIX_BAR_ID),
679 MSIX_BAR_SIZE);
680 return -ENODEV;
681 }
682
683 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
684
685 /* set DMA mask for GOYA */
686 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
687 if (rc) {
688 dev_warn(hdev->dev, "Unable to set pci dma mask to 39 bits\n");
689 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
690 if (rc) {
691 dev_err(hdev->dev,
692 "Unable to set pci dma mask to 32 bits\n");
693 return rc;
694 }
695 }
696
697 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
698 if (rc) {
699 dev_warn(hdev->dev,
700 "Unable to set pci consistent dma mask to 39 bits\n");
701 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
702 if (rc) {
703 dev_err(hdev->dev,
704 "Unable to set pci consistent dma mask to 32 bits\n");
705 return rc;
706 }
707 }
708
709 if (hdev->reset_pcilink)
710 goya_reset_link_through_bridge(hdev);
711
712 rc = pci_enable_device_mem(pdev);
713 if (rc) {
714 dev_err(hdev->dev, "can't enable PCI device\n");
715 return rc;
716 }
717
718 pci_set_master(pdev);
719
720 rc = goya_init_iatu(hdev);
721 if (rc) {
722 dev_err(hdev->dev, "Failed to initialize iATU\n");
723 goto disable_device;
724 }
725
726 rc = goya_pci_bars_map(hdev);
727 if (rc) {
728 dev_err(hdev->dev, "Failed to initialize PCI BARS\n");
729 goto disable_device;
730 }
731
Oded Gabbay839c4802019-02-16 00:39:16 +0200732 if (!hdev->pldm) {
733 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
734 if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
735 dev_warn(hdev->dev,
736 "PCI strap is not configured correctly, PCI bus errors may occur\n");
737 }
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200738
739 return 0;
740
741disable_device:
742 pci_clear_master(pdev);
743 pci_disable_device(pdev);
744
745 return rc;
746}
747
748/*
749 * goya_early_fini - GOYA early finalization code
750 *
751 * @hdev: pointer to hl_device structure
752 *
753 * Unmap PCI bars
754 *
755 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200756static int goya_early_fini(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200757{
758 goya_pci_bars_unmap(hdev);
759
760 pci_clear_master(hdev->pdev);
761 pci_disable_device(hdev->pdev);
762
763 return 0;
764}
765
766/*
Oded Gabbayd91389b2019-02-16 00:39:19 +0200767 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
768 *
769 * @hdev: pointer to hl_device structure
770 *
771 */
772static void goya_fetch_psoc_frequency(struct hl_device *hdev)
773{
774 struct asic_fixed_properties *prop = &hdev->asic_prop;
775
776 prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
777 prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
778 prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
779 prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
780}
781
782/*
783 * goya_late_init - GOYA late initialization code
784 *
785 * @hdev: pointer to hl_device structure
786 *
787 * Get ArmCP info and send message to CPU to enable PCI access
788 */
789static int goya_late_init(struct hl_device *hdev)
790{
791 struct asic_fixed_properties *prop = &hdev->asic_prop;
792 struct goya_device *goya = hdev->asic_specific;
793 int rc;
794
795 rc = goya->armcp_info_get(hdev);
796 if (rc) {
797 dev_err(hdev->dev, "Failed to get armcp info\n");
798 return rc;
799 }
800
801 /* Now that we have the DRAM size in ASIC prop, we need to check
802 * its size and configure the DMA_IF DDR wrap protection (which is in
803 * the MMU block) accordingly. The value is the log2 of the DRAM size
804 */
805 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
806
807 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
808 if (rc) {
809 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
810 return rc;
811 }
812
813 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
814 GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
815
816 goya_fetch_psoc_frequency(hdev);
817
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200818 rc = goya_mmu_clear_pgt_range(hdev);
819 if (rc) {
820 dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
821 goto disable_pci_access;
822 }
823
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200824 rc = goya_mmu_set_dram_default_page(hdev);
825 if (rc) {
826 dev_err(hdev->dev, "Failed to set DRAM default page\n");
827 goto disable_pci_access;
828 }
829
Oded Gabbayd91389b2019-02-16 00:39:19 +0200830 return 0;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200831
832disable_pci_access:
833 goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
834
835 return rc;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200836}
837
838/*
839 * goya_late_fini - GOYA late tear-down code
840 *
841 * @hdev: pointer to hl_device structure
842 *
843 * Free sensors allocated structures
844 */
845void goya_late_fini(struct hl_device *hdev)
846{
847 const struct hwmon_channel_info **channel_info_arr;
848 int i = 0;
849
850 if (!hdev->hl_chip_info->info)
851 return;
852
853 channel_info_arr = hdev->hl_chip_info->info;
854
855 while (channel_info_arr[i]) {
856 kfree(channel_info_arr[i]->config);
857 kfree(channel_info_arr[i]);
858 i++;
859 }
860
861 kfree(channel_info_arr);
862
863 hdev->hl_chip_info->info = NULL;
864}
865
866/*
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200867 * goya_sw_init - Goya software initialization code
868 *
869 * @hdev: pointer to hl_device structure
870 *
871 */
872static int goya_sw_init(struct hl_device *hdev)
873{
874 struct goya_device *goya;
875 int rc;
876
877 /* Allocate device structure */
878 goya = kzalloc(sizeof(*goya), GFP_KERNEL);
879 if (!goya)
880 return -ENOMEM;
881
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200882 goya->test_cpu_queue = goya_test_cpu_queue;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200883 goya->armcp_info_get = goya_armcp_info_get;
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200884
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200885 /* according to goya_init_iatu */
886 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200887
888 goya->mme_clk = GOYA_PLL_FREQ_LOW;
889 goya->tpc_clk = GOYA_PLL_FREQ_LOW;
890 goya->ic_clk = GOYA_PLL_FREQ_LOW;
891
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200892 hdev->asic_specific = goya;
893
894 /* Create DMA pool for small allocations */
895 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
896 &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
897 if (!hdev->dma_pool) {
898 dev_err(hdev->dev, "failed to create DMA pool\n");
899 rc = -ENOMEM;
900 goto free_goya_device;
901 }
902
903 hdev->cpu_accessible_dma_mem =
904 hdev->asic_funcs->dma_alloc_coherent(hdev,
905 CPU_ACCESSIBLE_MEM_SIZE,
906 &hdev->cpu_accessible_dma_address,
907 GFP_KERNEL | __GFP_ZERO);
908
909 if (!hdev->cpu_accessible_dma_mem) {
910 dev_err(hdev->dev,
911 "failed to allocate %d of dma memory for CPU accessible memory space\n",
912 CPU_ACCESSIBLE_MEM_SIZE);
913 rc = -ENOMEM;
914 goto free_dma_pool;
915 }
916
917 hdev->cpu_accessible_dma_pool = gen_pool_create(CPU_PKT_SHIFT, -1);
918 if (!hdev->cpu_accessible_dma_pool) {
919 dev_err(hdev->dev,
920 "Failed to create CPU accessible DMA pool\n");
921 rc = -ENOMEM;
922 goto free_cpu_pq_dma_mem;
923 }
924
925 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
926 (uintptr_t) hdev->cpu_accessible_dma_mem,
927 CPU_ACCESSIBLE_MEM_SIZE, -1);
928 if (rc) {
929 dev_err(hdev->dev,
930 "Failed to add memory to CPU accessible DMA pool\n");
931 rc = -EFAULT;
932 goto free_cpu_pq_pool;
933 }
934
935 spin_lock_init(&goya->hw_queues_lock);
936
937 return 0;
938
939free_cpu_pq_pool:
940 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
941free_cpu_pq_dma_mem:
942 hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
943 hdev->cpu_accessible_dma_mem,
944 hdev->cpu_accessible_dma_address);
945free_dma_pool:
946 dma_pool_destroy(hdev->dma_pool);
947free_goya_device:
948 kfree(goya);
949
950 return rc;
951}
952
953/*
954 * goya_sw_fini - Goya software tear-down code
955 *
956 * @hdev: pointer to hl_device structure
957 *
958 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200959static int goya_sw_fini(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200960{
961 struct goya_device *goya = hdev->asic_specific;
962
963 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
964
965 hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
966 hdev->cpu_accessible_dma_mem,
967 hdev->cpu_accessible_dma_address);
968
969 dma_pool_destroy(hdev->dma_pool);
970
971 kfree(goya);
972
973 return 0;
974}
975
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200976static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
977 dma_addr_t bus_address)
978{
979 struct goya_device *goya = hdev->asic_specific;
980 u32 mtr_base_lo, mtr_base_hi;
981 u32 so_base_lo, so_base_hi;
982 u32 gic_base_lo, gic_base_hi;
983 u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
984
985 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
986 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
987 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
988 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
989
990 gic_base_lo =
991 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
992 gic_base_hi =
993 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
994
995 WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
996 WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
997
998 WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
999 WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
1000 WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
1001
1002 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1003 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1004 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1005 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1006 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1007 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1008 WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
1009 GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
1010
1011 /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
1012 WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
1013 WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
1014
Oded Gabbay1251f232019-02-16 00:39:18 +02001015 if (goya->hw_cap_initialized & HW_CAP_MMU)
1016 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001017 else
Oded Gabbay1251f232019-02-16 00:39:18 +02001018 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001019
1020 WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN);
1021 WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
1022}
1023
1024static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
1025{
1026 u32 gic_base_lo, gic_base_hi;
1027 u64 sob_addr;
1028 u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
1029
1030 gic_base_lo =
1031 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1032 gic_base_hi =
1033 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1034
1035 WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
1036 WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
1037 WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
1038 GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
1039
Oded Gabbay887f7d32019-02-28 10:46:15 +02001040 if (dma_id)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001041 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
1042 (dma_id - 1) * 4;
Oded Gabbay887f7d32019-02-28 10:46:15 +02001043 else
1044 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
1045
1046 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + reg_off, lower_32_bits(sob_addr));
1047 WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
1048 WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001049}
1050
1051/*
1052 * goya_init_dma_qmans - Initialize QMAN DMA registers
1053 *
1054 * @hdev: pointer to hl_device structure
1055 *
1056 * Initialize the H/W registers of the QMAN DMA channels
1057 *
1058 */
1059static void goya_init_dma_qmans(struct hl_device *hdev)
1060{
1061 struct goya_device *goya = hdev->asic_specific;
1062 struct hl_hw_queue *q;
1063 dma_addr_t bus_address;
1064 int i;
1065
1066 if (goya->hw_cap_initialized & HW_CAP_DMA)
1067 return;
1068
1069 q = &hdev->kernel_queues[0];
1070
1071 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
1072 bus_address = q->bus_address +
1073 hdev->asic_prop.host_phys_base_address;
1074
1075 goya_init_dma_qman(hdev, i, bus_address);
1076 goya_init_dma_ch(hdev, i);
1077 }
1078
1079 goya->hw_cap_initialized |= HW_CAP_DMA;
1080}
1081
1082/*
1083 * goya_disable_external_queues - Disable external queues
1084 *
1085 * @hdev: pointer to hl_device structure
1086 *
1087 */
1088static void goya_disable_external_queues(struct hl_device *hdev)
1089{
1090 WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
1091 WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
1092 WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
1093 WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
1094 WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
1095}
1096
1097static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
1098 u32 cp_sts_reg, u32 glbl_sts0_reg)
1099{
1100 int rc;
1101 u32 status;
1102
1103 /* use the values of TPC0 as they are all the same*/
1104
1105 WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
1106
1107 status = RREG32(cp_sts_reg);
1108 if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
1109 rc = hl_poll_timeout(
1110 hdev,
1111 cp_sts_reg,
1112 status,
1113 !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
1114 1000,
1115 QMAN_FENCE_TIMEOUT_USEC);
1116
1117 /* if QMAN is stuck in fence no need to check for stop */
1118 if (rc)
1119 return 0;
1120 }
1121
1122 rc = hl_poll_timeout(
1123 hdev,
1124 glbl_sts0_reg,
1125 status,
1126 (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
1127 1000,
1128 QMAN_STOP_TIMEOUT_USEC);
1129
1130 if (rc) {
1131 dev_err(hdev->dev,
1132 "Timeout while waiting for QMAN to stop\n");
1133 return -EINVAL;
1134 }
1135
1136 return 0;
1137}
1138
1139/*
1140 * goya_stop_external_queues - Stop external queues
1141 *
1142 * @hdev: pointer to hl_device structure
1143 *
1144 * Returns 0 on success
1145 *
1146 */
1147static int goya_stop_external_queues(struct hl_device *hdev)
1148{
1149 int rc, retval = 0;
1150
1151 rc = goya_stop_queue(hdev,
1152 mmDMA_QM_0_GLBL_CFG1,
1153 mmDMA_QM_0_CP_STS,
1154 mmDMA_QM_0_GLBL_STS0);
1155
1156 if (rc) {
1157 dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
1158 retval = -EIO;
1159 }
1160
1161 rc = goya_stop_queue(hdev,
1162 mmDMA_QM_1_GLBL_CFG1,
1163 mmDMA_QM_1_CP_STS,
1164 mmDMA_QM_1_GLBL_STS0);
1165
1166 if (rc) {
1167 dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
1168 retval = -EIO;
1169 }
1170
1171 rc = goya_stop_queue(hdev,
1172 mmDMA_QM_2_GLBL_CFG1,
1173 mmDMA_QM_2_CP_STS,
1174 mmDMA_QM_2_GLBL_STS0);
1175
1176 if (rc) {
1177 dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
1178 retval = -EIO;
1179 }
1180
1181 rc = goya_stop_queue(hdev,
1182 mmDMA_QM_3_GLBL_CFG1,
1183 mmDMA_QM_3_CP_STS,
1184 mmDMA_QM_3_GLBL_STS0);
1185
1186 if (rc) {
1187 dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
1188 retval = -EIO;
1189 }
1190
1191 rc = goya_stop_queue(hdev,
1192 mmDMA_QM_4_GLBL_CFG1,
1193 mmDMA_QM_4_CP_STS,
1194 mmDMA_QM_4_GLBL_STS0);
1195
1196 if (rc) {
1197 dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
1198 retval = -EIO;
1199 }
1200
1201 return retval;
1202}
1203
1204static void goya_resume_external_queues(struct hl_device *hdev)
1205{
1206 WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
1207 WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
1208 WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
1209 WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
1210 WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
1211}
1212
1213/*
1214 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1215 *
1216 * @hdev: pointer to hl_device structure
1217 *
1218 * Returns 0 on success
1219 *
1220 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +02001221static int goya_init_cpu_queues(struct hl_device *hdev)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001222{
1223 struct goya_device *goya = hdev->asic_specific;
Oded Gabbay1251f232019-02-16 00:39:18 +02001224 struct hl_eq *eq;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001225 dma_addr_t bus_address;
1226 u32 status;
1227 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
1228 int err;
1229
1230 if (!hdev->cpu_queues_enable)
1231 return 0;
1232
1233 if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
1234 return 0;
1235
Oded Gabbay1251f232019-02-16 00:39:18 +02001236 eq = &hdev->event_queue;
1237
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001238 bus_address = cpu_pq->bus_address +
1239 hdev->asic_prop.host_phys_base_address;
1240 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, lower_32_bits(bus_address));
1241 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1, upper_32_bits(bus_address));
1242
Oded Gabbay1251f232019-02-16 00:39:18 +02001243 bus_address = eq->bus_address + hdev->asic_prop.host_phys_base_address;
1244 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(bus_address));
1245 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(bus_address));
1246
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001247 bus_address = hdev->cpu_accessible_dma_address +
1248 hdev->asic_prop.host_phys_base_address;
1249 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, lower_32_bits(bus_address));
1250 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, upper_32_bits(bus_address));
1251
1252 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
Oded Gabbay1251f232019-02-16 00:39:18 +02001253 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001254 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, CPU_ACCESSIBLE_MEM_SIZE);
1255
1256 /* Used for EQ CI */
1257 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
1258
1259 WREG32(mmCPU_IF_PF_PQ_PI, 0);
1260
1261 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP);
1262
1263 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1264 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1265
1266 err = hl_poll_timeout(
1267 hdev,
1268 mmPSOC_GLOBAL_CONF_SCRATCHPAD_7,
1269 status,
1270 (status == PQ_INIT_STATUS_READY_FOR_HOST),
1271 1000,
1272 GOYA_CPU_TIMEOUT_USEC);
1273
1274 if (err) {
1275 dev_err(hdev->dev,
1276 "Failed to communicate with ARM CPU (ArmCP timeout)\n");
1277 return -EIO;
1278 }
1279
1280 goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1281 return 0;
1282}
1283
Oded Gabbay839c4802019-02-16 00:39:16 +02001284static void goya_set_pll_refclk(struct hl_device *hdev)
1285{
1286 WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1287 WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1288 WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1289 WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1290
1291 WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1292 WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1293 WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1294 WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1295
1296 WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1297 WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1298 WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1299 WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1300
1301 WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1302 WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1303 WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1304 WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1305
1306 WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1307 WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1308 WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1309 WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1310
1311 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1312 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1313 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1314 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1315
1316 WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1317 WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1318 WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1319 WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1320}
1321
1322static void goya_disable_clk_rlx(struct hl_device *hdev)
1323{
1324 WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1325 WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1326}
1327
1328static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1329{
1330 u64 tpc_eml_address;
1331 u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1332 int err, slm_index;
1333
1334 tpc_offset = tpc_id * 0x40000;
1335 tpc_eml_offset = tpc_id * 0x200000;
1336 tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1337 tpc_slm_offset = tpc_eml_address + 0x100000;
1338
1339 /*
1340 * Workaround for Bug H2 #2443 :
1341 * "TPC SB is not initialized on chip reset"
1342 */
1343
1344 val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1345 if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1346 dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1347 tpc_id);
1348
1349 WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1350
1351 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1352 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1353 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1354 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1355 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1356 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1357 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1358 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1359 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1360 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1361
1362 WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1363 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1364
1365 err = hl_poll_timeout(
1366 hdev,
1367 mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1368 val,
1369 (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1370 1000,
1371 HL_DEVICE_TIMEOUT_USEC);
1372
1373 if (err)
1374 dev_err(hdev->dev,
1375 "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1376
1377 WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1378 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1379
1380 msleep(GOYA_RESET_WAIT_MSEC);
1381
1382 WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1383 ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1384
1385 msleep(GOYA_RESET_WAIT_MSEC);
1386
1387 for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1388 WREG32(tpc_slm_offset + (slm_index << 2), 0);
1389
1390 val = RREG32(tpc_slm_offset);
1391}
1392
1393static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1394{
1395 struct goya_device *goya = hdev->asic_specific;
1396 int i;
1397
1398 if (hdev->pldm)
1399 return;
1400
1401 if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1402 return;
1403
1404 /* Workaround for H2 #2443 */
1405
1406 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1407 _goya_tpc_mbist_workaround(hdev, i);
1408
1409 goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1410}
1411
1412/*
1413 * goya_init_golden_registers - Initialize golden registers
1414 *
1415 * @hdev: pointer to hl_device structure
1416 *
1417 * Initialize the H/W registers of the device
1418 *
1419 */
1420static void goya_init_golden_registers(struct hl_device *hdev)
1421{
1422 struct goya_device *goya = hdev->asic_specific;
1423 u32 polynom[10], tpc_intr_mask, offset;
1424 int i;
1425
1426 if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1427 return;
1428
1429 polynom[0] = 0x00020080;
1430 polynom[1] = 0x00401000;
1431 polynom[2] = 0x00200800;
1432 polynom[3] = 0x00002000;
1433 polynom[4] = 0x00080200;
1434 polynom[5] = 0x00040100;
1435 polynom[6] = 0x00100400;
1436 polynom[7] = 0x00004000;
1437 polynom[8] = 0x00010000;
1438 polynom[9] = 0x00008000;
1439
1440 /* Mask all arithmetic interrupts from TPC */
1441 tpc_intr_mask = 0x7FFF;
1442
1443 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1444 WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1445 WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1446 WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1447 WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1448 WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1449
1450 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1451 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1452 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1453 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1454 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1455
1456
1457 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1458 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1459 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1460 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1461 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1462
1463 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1464 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1465 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1466 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1467 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1468
1469 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1470 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1471 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1472 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1473 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1474
1475 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1476 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1477 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1478 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1479 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1480 }
1481
1482 WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1483 WREG32(mmMME_AGU, 0x0f0f0f10);
1484 WREG32(mmMME_SEI_MASK, ~0x0);
1485
1486 WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1487 WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1488 WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1489 WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1490 WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1491 WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1492 WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1493 WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1494 WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1495 WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1496 WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1497 WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1498 WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1499 WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1500 WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1501 WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1502 WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1503 WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1504 WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1505 WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1506 WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1507 WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1508 WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1509 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1510 WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1511 WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1512 WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1513 WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1514 WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1515 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1516 WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1517 WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1518 WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1519 WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1520 WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1521 WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1522 WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1523 WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1524 WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1525 WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1526 WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1527 WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1528 WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1529 WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1530 WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1531 WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1532 WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1533 WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1534 WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1535 WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1536 WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1537 WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1538 WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1539 WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1540 WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1541 WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1542 WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1543 WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1544 WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1545 WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1546 WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1547 WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1548 WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1549 WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1550 WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1551 WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1552 WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1553 WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1554 WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1555 WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1556 WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1557 WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1558 WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1559 WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1560 WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1561 WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1562 WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1563 WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1564 WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1565 WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1566 WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1567 WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1568 WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1569 WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1570
1571 WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1572 WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1573 WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1574 WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1575 WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1576 WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1577 WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1578 WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1579 WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1580 WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1581 WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1582 WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1583
1584 WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1585 WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1586 WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1587 WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1588 WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1589 WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1590 WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1591 WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1592 WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1593 WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1594 WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1595 WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1596
1597 WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1598 WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1599 WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1600 WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1601 WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1602 WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1603 WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1604 WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1605 WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1606 WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1607 WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1608 WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1609
1610 WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1611 WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1612 WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1613 WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1614 WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1615 WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1616 WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1617 WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1618 WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1619 WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1620 WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1621 WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1622
1623 WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1624 WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1625 WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1626 WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1627 WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1628 WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1629 WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1630 WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1631 WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1632 WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1633 WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1634 WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1635
1636 WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1637 WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1638 WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1639 WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1640 WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1641 WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1642 WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1643 WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1644 WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1645 WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1646 WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1647 WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1648
1649 for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1650 WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1651 WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1652 WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1653 WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1654 WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1655 WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1656
1657 WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1658 WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1659 WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1660 WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1661 WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1662 WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1663 WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1664 WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1665
1666 WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1667 WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1668 }
1669
1670 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1671 WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1672 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1673 WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1674 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1675 }
1676
1677 for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1678 /*
1679 * Workaround for Bug H2 #2441 :
1680 * "ST.NOP set trace event illegal opcode"
1681 */
1682 WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1683
1684 WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1685 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1686 WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1687 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1688 }
1689
1690 WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1691 WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1692 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1693
1694 WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1695 WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1696 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1697
1698 /*
1699 * Workaround for H2 #HW-23 bug
1700 * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
1701 * to 16 on KMD DMA
1702 * We need to limit only these DMAs because the user can only read
1703 * from Host using DMA CH 1
1704 */
1705 WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
1706 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1707
1708 goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1709}
1710
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001711static void goya_init_mme_qman(struct hl_device *hdev)
1712{
1713 u32 mtr_base_lo, mtr_base_hi;
1714 u32 so_base_lo, so_base_hi;
1715 u32 gic_base_lo, gic_base_hi;
1716 u64 qman_base_addr;
1717
1718 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1719 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1720 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1721 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1722
1723 gic_base_lo =
1724 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1725 gic_base_hi =
1726 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1727
1728 qman_base_addr = hdev->asic_prop.sram_base_address +
1729 MME_QMAN_BASE_OFFSET;
1730
1731 WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1732 WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1733 WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1734 WREG32(mmMME_QM_PQ_PI, 0);
1735 WREG32(mmMME_QM_PQ_CI, 0);
1736 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1737 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1738 WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1739 WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1740
1741 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1742 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1743 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1744 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1745
1746 /* QMAN CQ has 8 cache lines */
1747 WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1748
1749 WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1750 WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1751
1752 WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1753
1754 WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1755
1756 WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1757
1758 WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1759}
1760
1761static void goya_init_mme_cmdq(struct hl_device *hdev)
1762{
1763 u32 mtr_base_lo, mtr_base_hi;
1764 u32 so_base_lo, so_base_hi;
1765 u32 gic_base_lo, gic_base_hi;
1766 u64 qman_base_addr;
1767
1768 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1769 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1770 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1771 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1772
1773 gic_base_lo =
1774 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1775 gic_base_hi =
1776 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1777
1778 qman_base_addr = hdev->asic_prop.sram_base_address +
1779 MME_QMAN_BASE_OFFSET;
1780
1781 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1782 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1783 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1784 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1785
1786 /* CMDQ CQ has 20 cache lines */
1787 WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1788
1789 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1790 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1791
1792 WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1793
1794 WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1795
1796 WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1797
1798 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1799}
1800
1801static void goya_init_mme_qmans(struct hl_device *hdev)
1802{
1803 struct goya_device *goya = hdev->asic_specific;
1804 u32 so_base_lo, so_base_hi;
1805
1806 if (goya->hw_cap_initialized & HW_CAP_MME)
1807 return;
1808
1809 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1810 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1811
1812 WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1813 WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1814
1815 goya_init_mme_qman(hdev);
1816 goya_init_mme_cmdq(hdev);
1817
1818 goya->hw_cap_initialized |= HW_CAP_MME;
1819}
1820
1821static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1822{
1823 u32 mtr_base_lo, mtr_base_hi;
1824 u32 so_base_lo, so_base_hi;
1825 u32 gic_base_lo, gic_base_hi;
1826 u64 qman_base_addr;
1827 u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1828
1829 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1830 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1831 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1832 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1833
1834 gic_base_lo =
1835 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1836 gic_base_hi =
1837 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1838
1839 qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1840
1841 WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1842 WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1843 WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1844 WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1845 WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1846 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1847 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1848 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1849 WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1850
1851 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1852 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1853 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1854 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1855
1856 WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1857
1858 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1859 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1860
1861 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1862 GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1863
1864 WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1865
1866 WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1867
1868 WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1869}
1870
1871static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1872{
1873 u32 mtr_base_lo, mtr_base_hi;
1874 u32 so_base_lo, so_base_hi;
1875 u32 gic_base_lo, gic_base_hi;
1876 u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1877
1878 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1879 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1880 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1881 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1882
1883 gic_base_lo =
1884 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1885 gic_base_hi =
1886 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1887
1888 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1889 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1890 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1891 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1892
1893 WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
1894
1895 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1896 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1897
1898 WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
1899 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
1900
1901 WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
1902
1903 WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
1904
1905 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
1906}
1907
1908static void goya_init_tpc_qmans(struct hl_device *hdev)
1909{
1910 struct goya_device *goya = hdev->asic_specific;
1911 u32 so_base_lo, so_base_hi;
1912 u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
1913 mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
1914 int i;
1915
1916 if (goya->hw_cap_initialized & HW_CAP_TPC)
1917 return;
1918
1919 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1920 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1921
1922 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
1923 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
1924 so_base_lo);
1925 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
1926 so_base_hi);
1927 }
1928
1929 goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
1930 goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
1931 goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
1932 goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
1933 goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
1934 goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
1935 goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
1936 goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
1937
1938 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1939 goya_init_tpc_cmdq(hdev, i);
1940
1941 goya->hw_cap_initialized |= HW_CAP_TPC;
1942}
1943
1944/*
1945 * goya_disable_internal_queues - Disable internal queues
1946 *
1947 * @hdev: pointer to hl_device structure
1948 *
1949 */
1950static void goya_disable_internal_queues(struct hl_device *hdev)
1951{
1952 WREG32(mmMME_QM_GLBL_CFG0, 0);
1953 WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
1954
1955 WREG32(mmTPC0_QM_GLBL_CFG0, 0);
1956 WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
1957
1958 WREG32(mmTPC1_QM_GLBL_CFG0, 0);
1959 WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
1960
1961 WREG32(mmTPC2_QM_GLBL_CFG0, 0);
1962 WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
1963
1964 WREG32(mmTPC3_QM_GLBL_CFG0, 0);
1965 WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
1966
1967 WREG32(mmTPC4_QM_GLBL_CFG0, 0);
1968 WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
1969
1970 WREG32(mmTPC5_QM_GLBL_CFG0, 0);
1971 WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
1972
1973 WREG32(mmTPC6_QM_GLBL_CFG0, 0);
1974 WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
1975
1976 WREG32(mmTPC7_QM_GLBL_CFG0, 0);
1977 WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
1978}
1979
1980/*
1981 * goya_stop_internal_queues - Stop internal queues
1982 *
1983 * @hdev: pointer to hl_device structure
1984 *
1985 * Returns 0 on success
1986 *
1987 */
1988static int goya_stop_internal_queues(struct hl_device *hdev)
1989{
1990 int rc, retval = 0;
1991
1992 /*
1993 * Each queue (QMAN) is a separate H/W logic. That means that each
1994 * QMAN can be stopped independently and failure to stop one does NOT
1995 * mandate we should not try to stop other QMANs
1996 */
1997
1998 rc = goya_stop_queue(hdev,
1999 mmMME_QM_GLBL_CFG1,
2000 mmMME_QM_CP_STS,
2001 mmMME_QM_GLBL_STS0);
2002
2003 if (rc) {
2004 dev_err(hdev->dev, "failed to stop MME QMAN\n");
2005 retval = -EIO;
2006 }
2007
2008 rc = goya_stop_queue(hdev,
2009 mmMME_CMDQ_GLBL_CFG1,
2010 mmMME_CMDQ_CP_STS,
2011 mmMME_CMDQ_GLBL_STS0);
2012
2013 if (rc) {
2014 dev_err(hdev->dev, "failed to stop MME CMDQ\n");
2015 retval = -EIO;
2016 }
2017
2018 rc = goya_stop_queue(hdev,
2019 mmTPC0_QM_GLBL_CFG1,
2020 mmTPC0_QM_CP_STS,
2021 mmTPC0_QM_GLBL_STS0);
2022
2023 if (rc) {
2024 dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
2025 retval = -EIO;
2026 }
2027
2028 rc = goya_stop_queue(hdev,
2029 mmTPC0_CMDQ_GLBL_CFG1,
2030 mmTPC0_CMDQ_CP_STS,
2031 mmTPC0_CMDQ_GLBL_STS0);
2032
2033 if (rc) {
2034 dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
2035 retval = -EIO;
2036 }
2037
2038 rc = goya_stop_queue(hdev,
2039 mmTPC1_QM_GLBL_CFG1,
2040 mmTPC1_QM_CP_STS,
2041 mmTPC1_QM_GLBL_STS0);
2042
2043 if (rc) {
2044 dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
2045 retval = -EIO;
2046 }
2047
2048 rc = goya_stop_queue(hdev,
2049 mmTPC1_CMDQ_GLBL_CFG1,
2050 mmTPC1_CMDQ_CP_STS,
2051 mmTPC1_CMDQ_GLBL_STS0);
2052
2053 if (rc) {
2054 dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
2055 retval = -EIO;
2056 }
2057
2058 rc = goya_stop_queue(hdev,
2059 mmTPC2_QM_GLBL_CFG1,
2060 mmTPC2_QM_CP_STS,
2061 mmTPC2_QM_GLBL_STS0);
2062
2063 if (rc) {
2064 dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
2065 retval = -EIO;
2066 }
2067
2068 rc = goya_stop_queue(hdev,
2069 mmTPC2_CMDQ_GLBL_CFG1,
2070 mmTPC2_CMDQ_CP_STS,
2071 mmTPC2_CMDQ_GLBL_STS0);
2072
2073 if (rc) {
2074 dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
2075 retval = -EIO;
2076 }
2077
2078 rc = goya_stop_queue(hdev,
2079 mmTPC3_QM_GLBL_CFG1,
2080 mmTPC3_QM_CP_STS,
2081 mmTPC3_QM_GLBL_STS0);
2082
2083 if (rc) {
2084 dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
2085 retval = -EIO;
2086 }
2087
2088 rc = goya_stop_queue(hdev,
2089 mmTPC3_CMDQ_GLBL_CFG1,
2090 mmTPC3_CMDQ_CP_STS,
2091 mmTPC3_CMDQ_GLBL_STS0);
2092
2093 if (rc) {
2094 dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
2095 retval = -EIO;
2096 }
2097
2098 rc = goya_stop_queue(hdev,
2099 mmTPC4_QM_GLBL_CFG1,
2100 mmTPC4_QM_CP_STS,
2101 mmTPC4_QM_GLBL_STS0);
2102
2103 if (rc) {
2104 dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
2105 retval = -EIO;
2106 }
2107
2108 rc = goya_stop_queue(hdev,
2109 mmTPC4_CMDQ_GLBL_CFG1,
2110 mmTPC4_CMDQ_CP_STS,
2111 mmTPC4_CMDQ_GLBL_STS0);
2112
2113 if (rc) {
2114 dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
2115 retval = -EIO;
2116 }
2117
2118 rc = goya_stop_queue(hdev,
2119 mmTPC5_QM_GLBL_CFG1,
2120 mmTPC5_QM_CP_STS,
2121 mmTPC5_QM_GLBL_STS0);
2122
2123 if (rc) {
2124 dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
2125 retval = -EIO;
2126 }
2127
2128 rc = goya_stop_queue(hdev,
2129 mmTPC5_CMDQ_GLBL_CFG1,
2130 mmTPC5_CMDQ_CP_STS,
2131 mmTPC5_CMDQ_GLBL_STS0);
2132
2133 if (rc) {
2134 dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
2135 retval = -EIO;
2136 }
2137
2138 rc = goya_stop_queue(hdev,
2139 mmTPC6_QM_GLBL_CFG1,
2140 mmTPC6_QM_CP_STS,
2141 mmTPC6_QM_GLBL_STS0);
2142
2143 if (rc) {
2144 dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
2145 retval = -EIO;
2146 }
2147
2148 rc = goya_stop_queue(hdev,
2149 mmTPC6_CMDQ_GLBL_CFG1,
2150 mmTPC6_CMDQ_CP_STS,
2151 mmTPC6_CMDQ_GLBL_STS0);
2152
2153 if (rc) {
2154 dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
2155 retval = -EIO;
2156 }
2157
2158 rc = goya_stop_queue(hdev,
2159 mmTPC7_QM_GLBL_CFG1,
2160 mmTPC7_QM_CP_STS,
2161 mmTPC7_QM_GLBL_STS0);
2162
2163 if (rc) {
2164 dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
2165 retval = -EIO;
2166 }
2167
2168 rc = goya_stop_queue(hdev,
2169 mmTPC7_CMDQ_GLBL_CFG1,
2170 mmTPC7_CMDQ_CP_STS,
2171 mmTPC7_CMDQ_GLBL_STS0);
2172
2173 if (rc) {
2174 dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
2175 retval = -EIO;
2176 }
2177
2178 return retval;
2179}
2180
2181static void goya_resume_internal_queues(struct hl_device *hdev)
2182{
2183 WREG32(mmMME_QM_GLBL_CFG1, 0);
2184 WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
2185
2186 WREG32(mmTPC0_QM_GLBL_CFG1, 0);
2187 WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
2188
2189 WREG32(mmTPC1_QM_GLBL_CFG1, 0);
2190 WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
2191
2192 WREG32(mmTPC2_QM_GLBL_CFG1, 0);
2193 WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
2194
2195 WREG32(mmTPC3_QM_GLBL_CFG1, 0);
2196 WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
2197
2198 WREG32(mmTPC4_QM_GLBL_CFG1, 0);
2199 WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
2200
2201 WREG32(mmTPC5_QM_GLBL_CFG1, 0);
2202 WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
2203
2204 WREG32(mmTPC6_QM_GLBL_CFG1, 0);
2205 WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
2206
2207 WREG32(mmTPC7_QM_GLBL_CFG1, 0);
2208 WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
2209}
2210
Oded Gabbay1251f232019-02-16 00:39:18 +02002211static void goya_dma_stall(struct hl_device *hdev)
2212{
2213 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
2214 WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
2215 WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
2216 WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
2217 WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
2218}
2219
2220static void goya_tpc_stall(struct hl_device *hdev)
2221{
2222 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2223 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
2224 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
2225 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
2226 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
2227 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
2228 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
2229 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
2230}
2231
2232static void goya_mme_stall(struct hl_device *hdev)
2233{
2234 WREG32(mmMME_STALL, 0xFFFFFFFF);
2235}
2236
2237static int goya_enable_msix(struct hl_device *hdev)
2238{
2239 struct goya_device *goya = hdev->asic_specific;
2240 int cq_cnt = hdev->asic_prop.completion_queues_count;
2241 int rc, i, irq_cnt_init, irq;
2242
2243 if (goya->hw_cap_initialized & HW_CAP_MSIX)
2244 return 0;
2245
2246 rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
2247 GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
2248 if (rc < 0) {
2249 dev_err(hdev->dev,
2250 "MSI-X: Failed to enable support -- %d/%d\n",
2251 GOYA_MSIX_ENTRIES, rc);
2252 return rc;
2253 }
2254
2255 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
2256 irq = pci_irq_vector(hdev->pdev, i);
2257 rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
2258 &hdev->completion_queue[i]);
2259 if (rc) {
2260 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2261 goto free_irqs;
2262 }
2263 }
2264
2265 irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
2266
2267 rc = request_irq(irq, hl_irq_handler_eq, 0,
2268 goya_irq_name[EVENT_QUEUE_MSIX_IDX],
2269 &hdev->event_queue);
2270 if (rc) {
2271 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2272 goto free_irqs;
2273 }
2274
2275 goya->hw_cap_initialized |= HW_CAP_MSIX;
2276 return 0;
2277
2278free_irqs:
2279 for (i = 0 ; i < irq_cnt_init ; i++)
2280 free_irq(pci_irq_vector(hdev->pdev, i),
2281 &hdev->completion_queue[i]);
2282
2283 pci_free_irq_vectors(hdev->pdev);
2284 return rc;
2285}
2286
2287static void goya_sync_irqs(struct hl_device *hdev)
2288{
2289 struct goya_device *goya = hdev->asic_specific;
2290 int i;
2291
2292 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2293 return;
2294
2295 /* Wait for all pending IRQs to be finished */
2296 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2297 synchronize_irq(pci_irq_vector(hdev->pdev, i));
2298
2299 synchronize_irq(pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX));
2300}
2301
2302static void goya_disable_msix(struct hl_device *hdev)
2303{
2304 struct goya_device *goya = hdev->asic_specific;
2305 int i, irq;
2306
2307 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2308 return;
2309
2310 goya_sync_irqs(hdev);
2311
2312 irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
2313 free_irq(irq, &hdev->event_queue);
2314
2315 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2316 irq = pci_irq_vector(hdev->pdev, i);
2317 free_irq(irq, &hdev->completion_queue[i]);
2318 }
2319
2320 pci_free_irq_vectors(hdev->pdev);
2321
2322 goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2323}
2324
2325static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2326{
2327 u32 wait_timeout_ms, cpu_timeout_ms;
2328
2329 dev_info(hdev->dev,
2330 "Halting compute engines and disabling interrupts\n");
2331
2332 if (hdev->pldm) {
2333 wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2334 cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2335 } else {
2336 wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2337 cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2338 }
2339
2340 if (hard_reset) {
2341 /*
2342 * I don't know what is the state of the CPU so make sure it is
2343 * stopped in any means necessary
2344 */
2345 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2346 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2347 GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2348 msleep(cpu_timeout_ms);
2349 }
2350
2351 goya_stop_external_queues(hdev);
2352 goya_stop_internal_queues(hdev);
2353
2354 msleep(wait_timeout_ms);
2355
2356 goya_dma_stall(hdev);
2357 goya_tpc_stall(hdev);
2358 goya_mme_stall(hdev);
2359
2360 msleep(wait_timeout_ms);
2361
2362 goya_disable_external_queues(hdev);
2363 goya_disable_internal_queues(hdev);
2364
2365 if (hard_reset)
2366 goya_disable_msix(hdev);
2367 else
2368 goya_sync_irqs(hdev);
2369}
Oded Gabbay839c4802019-02-16 00:39:16 +02002370
2371/*
2372 * goya_push_fw_to_device - Push FW code to device
2373 *
2374 * @hdev: pointer to hl_device structure
2375 *
2376 * Copy fw code from firmware file to device memory.
2377 * Returns 0 on success
2378 *
2379 */
2380static int goya_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
2381 void __iomem *dst)
2382{
2383 const struct firmware *fw;
2384 const u64 *fw_data;
2385 size_t fw_size, i;
2386 int rc;
2387
2388 rc = request_firmware(&fw, fw_name, hdev->dev);
2389
2390 if (rc) {
2391 dev_err(hdev->dev, "Failed to request %s\n", fw_name);
2392 goto out;
2393 }
2394
2395 fw_size = fw->size;
2396 if ((fw_size % 4) != 0) {
2397 dev_err(hdev->dev, "illegal %s firmware size %zu\n",
2398 fw_name, fw_size);
2399 rc = -EINVAL;
2400 goto out;
2401 }
2402
2403 dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
2404
2405 fw_data = (const u64 *) fw->data;
2406
2407 if ((fw->size % 8) != 0)
2408 fw_size -= 8;
2409
2410 for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
2411 if (!(i & (0x80000 - 1))) {
2412 dev_dbg(hdev->dev,
2413 "copied so far %zu out of %zu for %s firmware",
2414 i, fw_size, fw_name);
2415 usleep_range(20, 100);
2416 }
2417
2418 writeq(*fw_data, dst);
2419 }
2420
2421 if ((fw->size % 8) != 0)
2422 writel(*(const u32 *) fw_data, dst);
2423
2424out:
2425 release_firmware(fw);
2426 return rc;
2427}
2428
2429static int goya_pldm_init_cpu(struct hl_device *hdev)
2430{
2431 char fw_name[200];
2432 void __iomem *dst;
2433 u32 val, unit_rst_val;
2434 int rc;
2435
2436 /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
2437 goya_init_golden_registers(hdev);
2438
2439 /* Put ARM cores into reset */
2440 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
2441 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2442
2443 /* Reset the CA53 MACRO */
2444 unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2445 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
2446 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2447 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
2448 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2449
2450 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
2451 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
2452 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2453 if (rc)
2454 return rc;
2455
2456 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2457 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2458 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2459 if (rc)
2460 return rc;
2461
2462 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2463 WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA);
2464
2465 WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0,
2466 lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2467 WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0,
2468 upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2469
2470 /* Release ARM core 0 from reset */
2471 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
2472 CPU_RESET_CORE0_DEASSERT);
2473 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2474
2475 return 0;
2476}
2477
2478/*
2479 * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
2480 * The version string should be located by that offset.
2481 */
2482static void goya_read_device_fw_version(struct hl_device *hdev,
2483 enum goya_fw_component fwc)
2484{
2485 const char *name;
2486 u32 ver_off;
2487 char *dest;
2488
2489 switch (fwc) {
2490 case FW_COMP_UBOOT:
2491 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29);
2492 dest = hdev->asic_prop.uboot_ver;
2493 name = "U-Boot";
2494 break;
2495 case FW_COMP_PREBOOT:
2496 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28);
2497 dest = hdev->asic_prop.preboot_ver;
2498 name = "Preboot";
2499 break;
2500 default:
2501 dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2502 return;
2503 }
2504
2505 ver_off &= ~((u32)SRAM_BASE_ADDR);
2506
2507 if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
2508 memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
2509 VERSION_MAX_LEN);
2510 } else {
2511 dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
2512 name, ver_off);
2513 strcpy(dest, "unavailable");
2514 }
2515}
2516
2517static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
2518{
2519 struct goya_device *goya = hdev->asic_specific;
2520 char fw_name[200];
2521 void __iomem *dst;
2522 u32 status;
2523 int rc;
2524
2525 if (!hdev->cpu_enable)
2526 return 0;
2527
2528 if (goya->hw_cap_initialized & HW_CAP_CPU)
2529 return 0;
2530
2531 /*
2532 * Before pushing u-boot/linux to device, need to set the ddr bar to
2533 * base address of dram
2534 */
2535 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2536 if (rc) {
2537 dev_err(hdev->dev,
2538 "failed to map DDR bar to DRAM base address\n");
2539 return rc;
2540 }
2541
2542 if (hdev->pldm) {
2543 rc = goya_pldm_init_cpu(hdev);
2544 if (rc)
2545 return rc;
2546
2547 goto out;
2548 }
2549
2550 /* Make sure CPU boot-loader is running */
2551 rc = hl_poll_timeout(
2552 hdev,
2553 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2554 status,
2555 (status == CPU_BOOT_STATUS_DRAM_RDY) ||
2556 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2557 10000,
2558 cpu_timeout);
2559
2560 if (rc) {
2561 dev_err(hdev->dev, "Error in ARM u-boot!");
2562 switch (status) {
2563 case CPU_BOOT_STATUS_NA:
2564 dev_err(hdev->dev,
2565 "ARM status %d - BTL did NOT run\n", status);
2566 break;
2567 case CPU_BOOT_STATUS_IN_WFE:
2568 dev_err(hdev->dev,
2569 "ARM status %d - Inside WFE loop\n", status);
2570 break;
2571 case CPU_BOOT_STATUS_IN_BTL:
2572 dev_err(hdev->dev,
2573 "ARM status %d - Stuck in BTL\n", status);
2574 break;
2575 case CPU_BOOT_STATUS_IN_PREBOOT:
2576 dev_err(hdev->dev,
2577 "ARM status %d - Stuck in Preboot\n", status);
2578 break;
2579 case CPU_BOOT_STATUS_IN_SPL:
2580 dev_err(hdev->dev,
2581 "ARM status %d - Stuck in SPL\n", status);
2582 break;
2583 case CPU_BOOT_STATUS_IN_UBOOT:
2584 dev_err(hdev->dev,
2585 "ARM status %d - Stuck in u-boot\n", status);
2586 break;
2587 case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
2588 dev_err(hdev->dev,
2589 "ARM status %d - DDR initialization failed\n",
2590 status);
2591 break;
2592 default:
2593 dev_err(hdev->dev,
2594 "ARM status %d - Invalid status code\n",
2595 status);
2596 break;
2597 }
2598 return -EIO;
2599 }
2600
2601 /* Read U-Boot version now in case we will later fail */
2602 goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
2603 goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
2604
2605 if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
2606 goto out;
2607
2608 if (!hdev->fw_loading) {
2609 dev_info(hdev->dev, "Skip loading FW\n");
2610 goto out;
2611 }
2612
2613 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2614 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2615 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2616 if (rc)
2617 return rc;
2618
2619 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2620
2621 rc = hl_poll_timeout(
2622 hdev,
2623 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2624 status,
2625 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2626 10000,
2627 cpu_timeout);
2628
2629 if (rc) {
2630 if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
2631 dev_err(hdev->dev,
2632 "ARM u-boot reports FIT image is corrupted\n");
2633 else
2634 dev_err(hdev->dev,
2635 "ARM Linux failed to load, %d\n", status);
2636 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA);
2637 return -EIO;
2638 }
2639
2640 dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2641
2642out:
2643 goya->hw_cap_initialized |= HW_CAP_CPU;
2644
2645 return 0;
2646}
2647
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002648static int goya_mmu_init(struct hl_device *hdev)
2649{
2650 struct asic_fixed_properties *prop = &hdev->asic_prop;
2651 struct goya_device *goya = hdev->asic_specific;
2652 u64 hop0_addr;
2653 int rc, i;
2654
2655 if (!hdev->mmu_enable)
2656 return 0;
2657
2658 if (goya->hw_cap_initialized & HW_CAP_MMU)
2659 return 0;
2660
2661 hdev->dram_supports_virtual_memory = true;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02002662 hdev->dram_default_page_mapping = true;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002663
2664 for (i = 0 ; i < prop->max_asid ; i++) {
2665 hop0_addr = prop->mmu_pgt_addr +
2666 (i * prop->mmu_hop_table_size);
2667
2668 rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2669 if (rc) {
2670 dev_err(hdev->dev,
2671 "failed to set hop0 addr for asid %d\n", i);
2672 goto err;
2673 }
2674 }
2675
2676 goya->hw_cap_initialized |= HW_CAP_MMU;
2677
2678 /* init MMU cache manage page */
Oded Gabbay1e7c1ec2019-02-28 10:46:13 +02002679 WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2680 lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2681 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002682
2683 /* Remove follower feature due to performance bug */
2684 WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2685 (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2686
2687 hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
2688
2689 WREG32(mmMMU_MMU_ENABLE, 1);
2690 WREG32(mmMMU_SPI_MASK, 0xF);
2691
2692 return 0;
2693
2694err:
2695 return rc;
2696}
2697
Oded Gabbay839c4802019-02-16 00:39:16 +02002698/*
2699 * goya_hw_init - Goya hardware initialization code
2700 *
2701 * @hdev: pointer to hl_device structure
2702 *
2703 * Returns 0 on success
2704 *
2705 */
2706static int goya_hw_init(struct hl_device *hdev)
2707{
2708 struct asic_fixed_properties *prop = &hdev->asic_prop;
2709 u32 val;
2710 int rc;
2711
2712 dev_info(hdev->dev, "Starting initialization of H/W\n");
2713
2714 /* Perform read from the device to make sure device is up */
2715 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2716
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02002717 /*
2718 * Let's mark in the H/W that we have reached this point. We check
2719 * this value in the reset_before_init function to understand whether
2720 * we need to reset the chip before doing H/W init. This register is
2721 * cleared by the H/W upon H/W reset
2722 */
2723 WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY);
2724
Oded Gabbay839c4802019-02-16 00:39:16 +02002725 rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
2726 if (rc) {
2727 dev_err(hdev->dev, "failed to initialize CPU\n");
2728 return rc;
2729 }
2730
2731 goya_tpc_mbist_workaround(hdev);
2732
2733 goya_init_golden_registers(hdev);
2734
2735 /*
2736 * After CPU initialization is finished, change DDR bar mapping inside
2737 * iATU to point to the start address of the MMU page tables
2738 */
2739 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
2740 (MMU_PAGE_TABLES_ADDR & ~(prop->dram_pci_bar_size - 0x1ull)));
2741 if (rc) {
2742 dev_err(hdev->dev,
2743 "failed to map DDR bar to MMU page tables\n");
2744 return rc;
2745 }
2746
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002747 rc = goya_mmu_init(hdev);
2748 if (rc)
2749 return rc;
2750
Oded Gabbay839c4802019-02-16 00:39:16 +02002751 goya_init_security(hdev);
2752
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002753 goya_init_dma_qmans(hdev);
2754
2755 goya_init_mme_qmans(hdev);
2756
2757 goya_init_tpc_qmans(hdev);
2758
Oded Gabbay1251f232019-02-16 00:39:18 +02002759 /* MSI-X must be enabled before CPU queues are initialized */
2760 rc = goya_enable_msix(hdev);
2761 if (rc)
2762 goto disable_queues;
2763
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002764 rc = goya_init_cpu_queues(hdev);
2765 if (rc) {
2766 dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
2767 rc);
Oded Gabbay1251f232019-02-16 00:39:18 +02002768 goto disable_msix;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002769 }
2770
Oded Gabbay839c4802019-02-16 00:39:16 +02002771 /* CPU initialization is finished, we can now move to 48 bit DMA mask */
2772 rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
2773 if (rc) {
2774 dev_warn(hdev->dev, "Unable to set pci dma mask to 48 bits\n");
2775 rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
2776 if (rc) {
2777 dev_err(hdev->dev,
2778 "Unable to set pci dma mask to 32 bits\n");
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002779 goto disable_pci_access;
Oded Gabbay839c4802019-02-16 00:39:16 +02002780 }
2781 }
2782
2783 rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
2784 if (rc) {
2785 dev_warn(hdev->dev,
2786 "Unable to set pci consistent dma mask to 48 bits\n");
2787 rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
2788 if (rc) {
2789 dev_err(hdev->dev,
2790 "Unable to set pci consistent dma mask to 32 bits\n");
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002791 goto disable_pci_access;
Oded Gabbay839c4802019-02-16 00:39:16 +02002792 }
2793 }
2794
2795 /* Perform read from the device to flush all MSI-X configuration */
2796 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2797
2798 return 0;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002799
2800disable_pci_access:
2801 goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
Oded Gabbay1251f232019-02-16 00:39:18 +02002802disable_msix:
2803 goya_disable_msix(hdev);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002804disable_queues:
2805 goya_disable_internal_queues(hdev);
2806 goya_disable_external_queues(hdev);
2807
2808 return rc;
Oded Gabbay839c4802019-02-16 00:39:16 +02002809}
2810
2811/*
2812 * goya_hw_fini - Goya hardware tear-down code
2813 *
2814 * @hdev: pointer to hl_device structure
2815 * @hard_reset: should we do hard reset to all engines or just reset the
2816 * compute/dma engines
2817 */
2818static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
2819{
2820 struct goya_device *goya = hdev->asic_specific;
2821 u32 reset_timeout_ms, status;
2822
2823 if (hdev->pldm)
2824 reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2825 else
2826 reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2827
2828 if (hard_reset) {
2829 goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2830 goya_disable_clk_rlx(hdev);
2831 goya_set_pll_refclk(hdev);
2832
2833 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2834 dev_info(hdev->dev,
2835 "Issued HARD reset command, going to wait %dms\n",
2836 reset_timeout_ms);
2837 } else {
2838 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2839 dev_info(hdev->dev,
2840 "Issued SOFT reset command, going to wait %dms\n",
2841 reset_timeout_ms);
2842 }
2843
2844 /*
2845 * After hard reset, we can't poll the BTM_FSM register because the PSOC
2846 * itself is in reset. In either reset we need to wait until the reset
2847 * is deasserted
2848 */
2849 msleep(reset_timeout_ms);
2850
2851 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2852 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
2853 dev_err(hdev->dev,
2854 "Timeout while waiting for device to reset 0x%x\n",
2855 status);
2856
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02002857 if (!hard_reset) {
2858 goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2859 HW_CAP_GOLDEN | HW_CAP_TPC);
2860 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2861 GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2862 return;
2863 }
2864
Oded Gabbay839c4802019-02-16 00:39:16 +02002865 /* Chicken bit to re-initiate boot sequencer flow */
2866 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2867 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2868 /* Move boot manager FSM to pre boot sequencer init state */
2869 WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2870 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2871
2872 goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2873 HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2874 HW_CAP_DMA | HW_CAP_MME |
2875 HW_CAP_MMU | HW_CAP_TPC_MBIST |
2876 HW_CAP_GOLDEN | HW_CAP_TPC);
Oded Gabbay1251f232019-02-16 00:39:18 +02002877 memset(goya->events_stat, 0, sizeof(goya->events_stat));
Oded Gabbay839c4802019-02-16 00:39:16 +02002878
2879 if (!hdev->pldm) {
2880 int rc;
2881 /* In case we are running inside VM and the VM is
2882 * shutting down, we need to make sure CPU boot-loader
2883 * is running before we can continue the VM shutdown.
2884 * That is because the VM will send an FLR signal that
2885 * we must answer
2886 */
2887 dev_info(hdev->dev,
2888 "Going to wait up to %ds for CPU boot loader\n",
2889 GOYA_CPU_TIMEOUT_USEC / 1000 / 1000);
2890
2891 rc = hl_poll_timeout(
2892 hdev,
2893 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2894 status,
2895 (status == CPU_BOOT_STATUS_DRAM_RDY),
2896 10000,
2897 GOYA_CPU_TIMEOUT_USEC);
2898 if (rc)
2899 dev_err(hdev->dev,
2900 "failed to wait for CPU boot loader\n");
2901 }
2902}
2903
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002904int goya_suspend(struct hl_device *hdev)
2905{
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002906 int rc;
2907
2908 rc = goya_stop_internal_queues(hdev);
2909
2910 if (rc) {
2911 dev_err(hdev->dev, "failed to stop internal queues\n");
2912 return rc;
2913 }
2914
2915 rc = goya_stop_external_queues(hdev);
2916
2917 if (rc) {
2918 dev_err(hdev->dev, "failed to stop external queues\n");
2919 return rc;
2920 }
2921
2922 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2923 if (rc)
2924 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2925
2926 return rc;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002927}
2928
2929int goya_resume(struct hl_device *hdev)
2930{
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002931 int rc;
2932
2933 goya_resume_external_queues(hdev);
2934 goya_resume_internal_queues(hdev);
2935
2936 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
2937 if (rc)
2938 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
2939 return rc;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002940}
2941
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002942static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
Oded Gabbaybe5d9262019-02-16 00:39:15 +02002943 u64 kaddress, phys_addr_t paddress, u32 size)
2944{
2945 int rc;
2946
2947 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2948 VM_DONTCOPY | VM_NORESERVE;
2949
2950 rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
2951 size, vma->vm_page_prot);
2952 if (rc)
2953 dev_err(hdev->dev, "remap_pfn_range error %d", rc);
2954
2955 return rc;
2956}
2957
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002958static void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002959{
2960 u32 db_reg_offset, db_value;
2961 bool invalid_queue = false;
2962
2963 switch (hw_queue_id) {
2964 case GOYA_QUEUE_ID_DMA_0:
2965 db_reg_offset = mmDMA_QM_0_PQ_PI;
2966 break;
2967
2968 case GOYA_QUEUE_ID_DMA_1:
2969 db_reg_offset = mmDMA_QM_1_PQ_PI;
2970 break;
2971
2972 case GOYA_QUEUE_ID_DMA_2:
2973 db_reg_offset = mmDMA_QM_2_PQ_PI;
2974 break;
2975
2976 case GOYA_QUEUE_ID_DMA_3:
2977 db_reg_offset = mmDMA_QM_3_PQ_PI;
2978 break;
2979
2980 case GOYA_QUEUE_ID_DMA_4:
2981 db_reg_offset = mmDMA_QM_4_PQ_PI;
2982 break;
2983
2984 case GOYA_QUEUE_ID_CPU_PQ:
2985 if (hdev->cpu_queues_enable)
2986 db_reg_offset = mmCPU_IF_PF_PQ_PI;
2987 else
2988 invalid_queue = true;
2989 break;
2990
2991 case GOYA_QUEUE_ID_MME:
2992 db_reg_offset = mmMME_QM_PQ_PI;
2993 break;
2994
2995 case GOYA_QUEUE_ID_TPC0:
2996 db_reg_offset = mmTPC0_QM_PQ_PI;
2997 break;
2998
2999 case GOYA_QUEUE_ID_TPC1:
3000 db_reg_offset = mmTPC1_QM_PQ_PI;
3001 break;
3002
3003 case GOYA_QUEUE_ID_TPC2:
3004 db_reg_offset = mmTPC2_QM_PQ_PI;
3005 break;
3006
3007 case GOYA_QUEUE_ID_TPC3:
3008 db_reg_offset = mmTPC3_QM_PQ_PI;
3009 break;
3010
3011 case GOYA_QUEUE_ID_TPC4:
3012 db_reg_offset = mmTPC4_QM_PQ_PI;
3013 break;
3014
3015 case GOYA_QUEUE_ID_TPC5:
3016 db_reg_offset = mmTPC5_QM_PQ_PI;
3017 break;
3018
3019 case GOYA_QUEUE_ID_TPC6:
3020 db_reg_offset = mmTPC6_QM_PQ_PI;
3021 break;
3022
3023 case GOYA_QUEUE_ID_TPC7:
3024 db_reg_offset = mmTPC7_QM_PQ_PI;
3025 break;
3026
3027 default:
3028 invalid_queue = true;
3029 }
3030
3031 if (invalid_queue) {
3032 /* Should never get here */
3033 dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
3034 hw_queue_id);
3035 return;
3036 }
3037
3038 db_value = pi;
3039
3040 /* ring the doorbell */
3041 WREG32(db_reg_offset, db_value);
3042
3043 if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
3044 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
3045 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
3046}
3047
3048void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
3049{
3050 /* Not needed in Goya */
3051}
3052
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003053static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02003054 dma_addr_t *dma_handle, gfp_t flags)
3055{
3056 return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags);
3057}
3058
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003059static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
3060 void *cpu_addr, dma_addr_t dma_handle)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02003061{
3062 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle);
3063}
3064
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003065void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
3066 dma_addr_t *dma_handle, u16 *queue_len)
3067{
3068 void *base;
3069 u32 offset;
3070
3071 *dma_handle = hdev->asic_prop.sram_base_address;
3072
3073 base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
3074
3075 switch (queue_id) {
3076 case GOYA_QUEUE_ID_MME:
3077 offset = MME_QMAN_BASE_OFFSET;
3078 *queue_len = MME_QMAN_LENGTH;
3079 break;
3080 case GOYA_QUEUE_ID_TPC0:
3081 offset = TPC0_QMAN_BASE_OFFSET;
3082 *queue_len = TPC_QMAN_LENGTH;
3083 break;
3084 case GOYA_QUEUE_ID_TPC1:
3085 offset = TPC1_QMAN_BASE_OFFSET;
3086 *queue_len = TPC_QMAN_LENGTH;
3087 break;
3088 case GOYA_QUEUE_ID_TPC2:
3089 offset = TPC2_QMAN_BASE_OFFSET;
3090 *queue_len = TPC_QMAN_LENGTH;
3091 break;
3092 case GOYA_QUEUE_ID_TPC3:
3093 offset = TPC3_QMAN_BASE_OFFSET;
3094 *queue_len = TPC_QMAN_LENGTH;
3095 break;
3096 case GOYA_QUEUE_ID_TPC4:
3097 offset = TPC4_QMAN_BASE_OFFSET;
3098 *queue_len = TPC_QMAN_LENGTH;
3099 break;
3100 case GOYA_QUEUE_ID_TPC5:
3101 offset = TPC5_QMAN_BASE_OFFSET;
3102 *queue_len = TPC_QMAN_LENGTH;
3103 break;
3104 case GOYA_QUEUE_ID_TPC6:
3105 offset = TPC6_QMAN_BASE_OFFSET;
3106 *queue_len = TPC_QMAN_LENGTH;
3107 break;
3108 case GOYA_QUEUE_ID_TPC7:
3109 offset = TPC7_QMAN_BASE_OFFSET;
3110 *queue_len = TPC_QMAN_LENGTH;
3111 break;
3112 default:
3113 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
3114 return NULL;
3115 }
3116
3117 base += offset;
3118 *dma_handle += offset;
3119
3120 return base;
3121}
3122
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003123static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003124{
3125 struct goya_device *goya = hdev->asic_specific;
3126 struct packet_msg_prot *fence_pkt;
3127 u32 *fence_ptr;
3128 dma_addr_t fence_dma_addr;
3129 struct hl_cb *cb;
Omer Shpigelman3dccd182019-02-28 10:46:16 +02003130 u32 tmp, timeout;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003131 int rc;
3132
Omer Shpigelman3dccd182019-02-28 10:46:16 +02003133 if (hdev->pldm)
3134 timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
3135 else
3136 timeout = HL_DEVICE_TIMEOUT_USEC;
3137
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003138 if (!hdev->asic_funcs->is_device_idle(hdev)) {
3139 dev_err_ratelimited(hdev->dev,
3140 "Can't send KMD job on QMAN0 if device is not idle\n");
3141 return -EFAULT;
3142 }
3143
3144 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3145 &fence_dma_addr);
3146 if (!fence_ptr) {
3147 dev_err(hdev->dev,
3148 "Failed to allocate fence memory for QMAN0\n");
3149 return -ENOMEM;
3150 }
3151
3152 *fence_ptr = 0;
3153
3154 if (goya->hw_cap_initialized & HW_CAP_MMU) {
3155 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
3156 RREG32(mmDMA_QM_0_GLBL_PROT);
3157 }
3158
3159 /*
3160 * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For
3161 * synchronized kernel jobs we only need space for 1 packet_msg_prot
3162 */
3163 job->job_cb_size -= sizeof(struct packet_msg_prot);
3164
3165 cb = job->patched_cb;
3166
3167 fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
3168 job->job_cb_size - sizeof(struct packet_msg_prot));
3169
3170 fence_pkt->ctl = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3171 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3172 (1 << GOYA_PKT_CTL_MB_SHIFT);
3173 fence_pkt->value = GOYA_QMAN0_FENCE_VAL;
3174 fence_pkt->addr = fence_dma_addr +
3175 hdev->asic_prop.host_phys_base_address;
3176
3177 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
3178 job->job_cb_size, cb->bus_address);
3179 if (rc) {
3180 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
3181 goto free_fence_ptr;
3182 }
3183
Omer Shpigelman3dccd182019-02-28 10:46:16 +02003184 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, timeout,
3185 &tmp);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003186
3187 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
3188
3189 if ((rc) || (tmp != GOYA_QMAN0_FENCE_VAL)) {
3190 dev_err(hdev->dev, "QMAN0 Job hasn't finished in time\n");
3191 rc = -ETIMEDOUT;
3192 }
3193
3194free_fence_ptr:
3195 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
3196 fence_dma_addr);
3197
3198 if (goya->hw_cap_initialized & HW_CAP_MMU) {
3199 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
3200 RREG32(mmDMA_QM_0_GLBL_PROT);
3201 }
3202
3203 return rc;
3204}
3205
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003206int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
3207 u32 timeout, long *result)
3208{
3209 struct goya_device *goya = hdev->asic_specific;
3210 struct armcp_packet *pkt;
3211 dma_addr_t pkt_dma_addr;
3212 u32 tmp;
3213 int rc = 0;
3214
3215 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
3216 if (result)
3217 *result = 0;
3218 return 0;
3219 }
3220
3221 if (len > CPU_CB_SIZE) {
3222 dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
3223 len);
3224 return -ENOMEM;
3225 }
3226
3227 pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
3228 &pkt_dma_addr);
3229 if (!pkt) {
3230 dev_err(hdev->dev,
3231 "Failed to allocate DMA memory for packet to CPU\n");
3232 return -ENOMEM;
3233 }
3234
3235 memcpy(pkt, msg, len);
3236
3237 mutex_lock(&hdev->send_cpu_message_lock);
3238
3239 if (hdev->disabled)
3240 goto out;
3241
Oded Gabbaya28ce422019-02-28 10:46:12 +02003242 if (hdev->device_cpu_disabled) {
3243 rc = -EIO;
3244 goto out;
3245 }
3246
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003247 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_CPU_PQ, len,
3248 pkt_dma_addr);
3249 if (rc) {
3250 dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
3251 goto out;
3252 }
3253
3254 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
3255 timeout, &tmp);
3256
3257 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_CPU_PQ);
3258
3259 if (rc == -ETIMEDOUT) {
Oded Gabbaya28ce422019-02-28 10:46:12 +02003260 dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
3261 hdev->device_cpu_disabled = true;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003262 goto out;
3263 }
3264
3265 if (tmp == ARMCP_PACKET_FENCE_VAL) {
3266 rc = (pkt->ctl & ARMCP_PKT_CTL_RC_MASK) >>
3267 ARMCP_PKT_CTL_RC_SHIFT;
3268 if (rc) {
3269 dev_err(hdev->dev,
3270 "F/W ERROR %d for CPU packet %d\n",
3271 rc, (pkt->ctl & ARMCP_PKT_CTL_OPCODE_MASK)
3272 >> ARMCP_PKT_CTL_OPCODE_SHIFT);
3273 rc = -EINVAL;
3274 } else if (result) {
3275 *result = pkt->result;
3276 }
3277 } else {
3278 dev_err(hdev->dev, "CPU packet wrong fence value\n");
3279 rc = -EINVAL;
3280 }
3281
3282out:
3283 mutex_unlock(&hdev->send_cpu_message_lock);
3284
3285 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
3286
3287 return rc;
3288}
3289
3290int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3291{
3292 struct packet_msg_prot *fence_pkt;
3293 dma_addr_t pkt_dma_addr;
3294 u32 fence_val, tmp;
3295 dma_addr_t fence_dma_addr;
3296 u32 *fence_ptr;
3297 int rc;
3298
3299 fence_val = GOYA_QMAN0_FENCE_VAL;
3300
3301 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3302 &fence_dma_addr);
3303 if (!fence_ptr) {
3304 dev_err(hdev->dev,
3305 "Failed to allocate memory for queue testing\n");
3306 return -ENOMEM;
3307 }
3308
3309 *fence_ptr = 0;
3310
3311 fence_pkt = hdev->asic_funcs->dma_pool_zalloc(hdev,
3312 sizeof(struct packet_msg_prot),
3313 GFP_KERNEL, &pkt_dma_addr);
3314 if (!fence_pkt) {
3315 dev_err(hdev->dev,
3316 "Failed to allocate packet for queue testing\n");
3317 rc = -ENOMEM;
3318 goto free_fence_ptr;
3319 }
3320
3321 fence_pkt->ctl = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3322 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3323 (1 << GOYA_PKT_CTL_MB_SHIFT);
3324 fence_pkt->value = fence_val;
3325 fence_pkt->addr = fence_dma_addr +
3326 hdev->asic_prop.host_phys_base_address;
3327
3328 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
3329 sizeof(struct packet_msg_prot),
3330 pkt_dma_addr);
3331 if (rc) {
3332 dev_err(hdev->dev,
3333 "Failed to send fence packet\n");
3334 goto free_pkt;
3335 }
3336
3337 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr,
3338 GOYA_TEST_QUEUE_WAIT_USEC, &tmp);
3339
3340 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
3341
3342 if ((!rc) && (tmp == fence_val)) {
3343 dev_info(hdev->dev,
3344 "queue test on H/W queue %d succeeded\n",
3345 hw_queue_id);
3346 } else {
3347 dev_err(hdev->dev,
3348 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
3349 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
3350 rc = -EINVAL;
3351 }
3352
3353free_pkt:
3354 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_pkt,
3355 pkt_dma_addr);
3356free_fence_ptr:
3357 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
3358 fence_dma_addr);
3359 return rc;
3360}
3361
3362int goya_test_cpu_queue(struct hl_device *hdev)
3363{
3364 struct armcp_packet test_pkt;
3365 long result;
3366 int rc;
3367
3368 /* cpu_queues_enable flag is always checked in send cpu message */
3369
3370 memset(&test_pkt, 0, sizeof(test_pkt));
3371
3372 test_pkt.ctl = ARMCP_PACKET_TEST << ARMCP_PKT_CTL_OPCODE_SHIFT;
3373 test_pkt.value = ARMCP_PACKET_FENCE_VAL;
3374
3375 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
3376 sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
3377
Oded Gabbaya507fbb2019-02-22 21:29:58 +02003378 if (!rc) {
3379 if (result == ARMCP_PACKET_FENCE_VAL)
3380 dev_info(hdev->dev,
3381 "queue test on CPU queue succeeded\n");
3382 else
3383 dev_err(hdev->dev,
3384 "CPU queue test failed (0x%08lX)\n", result);
3385 } else {
3386 dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
3387 }
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003388
3389 return rc;
3390}
3391
3392static int goya_test_queues(struct hl_device *hdev)
3393{
3394 struct goya_device *goya = hdev->asic_specific;
3395 int i, rc, ret_val = 0;
3396
3397 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
3398 rc = goya_test_queue(hdev, i);
3399 if (rc)
3400 ret_val = -EINVAL;
3401 }
3402
3403 if (hdev->cpu_queues_enable) {
3404 rc = goya->test_cpu_queue(hdev);
3405 if (rc)
3406 ret_val = -EINVAL;
3407 }
3408
3409 return ret_val;
3410}
3411
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003412static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
3413 gfp_t mem_flags, dma_addr_t *dma_handle)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003414{
3415 if (size > GOYA_DMA_POOL_BLK_SIZE)
3416 return NULL;
3417
3418 return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
3419}
3420
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003421static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
3422 dma_addr_t dma_addr)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003423{
3424 dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
3425}
3426
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003427static void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
3428 size_t size, dma_addr_t *dma_handle)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003429{
3430 u64 kernel_addr;
3431
3432 /* roundup to CPU_PKT_SIZE */
3433 size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
3434
3435 kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
3436
3437 *dma_handle = hdev->cpu_accessible_dma_address +
3438 (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
3439
3440 return (void *) (uintptr_t) kernel_addr;
3441}
3442
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003443static void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev,
3444 size_t size, void *vaddr)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003445{
3446 /* roundup to CPU_PKT_SIZE */
3447 size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
3448
3449 gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
3450 size);
3451}
3452
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003453static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg,
3454 int nents, enum dma_data_direction dir)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003455{
3456 if (!dma_map_sg(&hdev->pdev->dev, sg, nents, dir))
3457 return -ENOMEM;
3458
3459 return 0;
3460}
3461
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003462static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sg,
3463 int nents, enum dma_data_direction dir)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003464{
3465 dma_unmap_sg(&hdev->pdev->dev, sg, nents, dir);
3466}
3467
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003468u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003469{
3470 struct scatterlist *sg, *sg_next_iter;
Oded Gabbaye99f16832019-02-24 11:55:26 +02003471 u32 count, dma_desc_cnt;
3472 u64 len, len_next;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003473 dma_addr_t addr, addr_next;
3474
3475 dma_desc_cnt = 0;
3476
3477 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3478
3479 len = sg_dma_len(sg);
3480 addr = sg_dma_address(sg);
3481
3482 if (len == 0)
3483 break;
3484
3485 while ((count + 1) < sgt->nents) {
3486 sg_next_iter = sg_next(sg);
3487 len_next = sg_dma_len(sg_next_iter);
3488 addr_next = sg_dma_address(sg_next_iter);
3489
3490 if (len_next == 0)
3491 break;
3492
3493 if ((addr + len == addr_next) &&
3494 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3495 len += len_next;
3496 count++;
3497 sg = sg_next_iter;
3498 } else {
3499 break;
3500 }
3501 }
3502
3503 dma_desc_cnt++;
3504 }
3505
3506 return dma_desc_cnt * sizeof(struct packet_lin_dma);
3507}
3508
3509static int goya_pin_memory_before_cs(struct hl_device *hdev,
3510 struct hl_cs_parser *parser,
3511 struct packet_lin_dma *user_dma_pkt,
3512 u64 addr, enum dma_data_direction dir)
3513{
3514 struct hl_userptr *userptr;
3515 int rc;
3516
3517 if (hl_userptr_is_pinned(hdev, addr, user_dma_pkt->tsize,
3518 parser->job_userptr_list, &userptr))
3519 goto already_pinned;
3520
3521 userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
3522 if (!userptr)
3523 return -ENOMEM;
3524
3525 rc = hl_pin_host_memory(hdev, addr, user_dma_pkt->tsize, userptr);
3526 if (rc)
3527 goto free_userptr;
3528
3529 list_add_tail(&userptr->job_node, parser->job_userptr_list);
3530
3531 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
3532 userptr->sgt->nents, dir);
3533 if (rc) {
3534 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3535 goto unpin_memory;
3536 }
3537
3538 userptr->dma_mapped = true;
3539 userptr->dir = dir;
3540
3541already_pinned:
3542 parser->patched_cb_size +=
3543 goya_get_dma_desc_list_size(hdev, userptr->sgt);
3544
3545 return 0;
3546
3547unpin_memory:
3548 hl_unpin_host_memory(hdev, userptr);
3549free_userptr:
3550 kfree(userptr);
3551 return rc;
3552}
3553
3554static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3555 struct hl_cs_parser *parser,
3556 struct packet_lin_dma *user_dma_pkt)
3557{
3558 u64 device_memory_addr, addr;
3559 enum dma_data_direction dir;
3560 enum goya_dma_direction user_dir;
3561 bool sram_addr = true;
3562 bool skip_host_mem_pin = false;
3563 bool user_memset;
3564 int rc = 0;
3565
3566 user_dir = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3567 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3568
3569 user_memset = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3570 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3571
3572 switch (user_dir) {
3573 case DMA_HOST_TO_DRAM:
3574 dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3575 dir = DMA_TO_DEVICE;
3576 sram_addr = false;
3577 addr = user_dma_pkt->src_addr;
3578 device_memory_addr = user_dma_pkt->dst_addr;
3579 if (user_memset)
3580 skip_host_mem_pin = true;
3581 break;
3582
3583 case DMA_DRAM_TO_HOST:
3584 dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3585 dir = DMA_FROM_DEVICE;
3586 sram_addr = false;
3587 addr = user_dma_pkt->dst_addr;
3588 device_memory_addr = user_dma_pkt->src_addr;
3589 break;
3590
3591 case DMA_HOST_TO_SRAM:
3592 dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3593 dir = DMA_TO_DEVICE;
3594 addr = user_dma_pkt->src_addr;
3595 device_memory_addr = user_dma_pkt->dst_addr;
3596 if (user_memset)
3597 skip_host_mem_pin = true;
3598 break;
3599
3600 case DMA_SRAM_TO_HOST:
3601 dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3602 dir = DMA_FROM_DEVICE;
3603 addr = user_dma_pkt->dst_addr;
3604 device_memory_addr = user_dma_pkt->src_addr;
3605 break;
3606 default:
3607 dev_err(hdev->dev, "DMA direction is undefined\n");
3608 return -EFAULT;
3609 }
3610
3611 if (parser->ctx_id != HL_KERNEL_ASID_ID) {
3612 if (sram_addr) {
3613 if (!hl_mem_area_inside_range(device_memory_addr,
3614 user_dma_pkt->tsize,
3615 hdev->asic_prop.sram_user_base_address,
3616 hdev->asic_prop.sram_end_address)) {
3617
3618 dev_err(hdev->dev,
3619 "SRAM address 0x%llx + 0x%x is invalid\n",
3620 device_memory_addr,
3621 user_dma_pkt->tsize);
3622 return -EFAULT;
3623 }
3624 } else {
3625 if (!hl_mem_area_inside_range(device_memory_addr,
3626 user_dma_pkt->tsize,
3627 hdev->asic_prop.dram_user_base_address,
3628 hdev->asic_prop.dram_end_address)) {
3629
3630 dev_err(hdev->dev,
3631 "DRAM address 0x%llx + 0x%x is invalid\n",
3632 device_memory_addr,
3633 user_dma_pkt->tsize);
3634 return -EFAULT;
3635 }
3636 }
3637 }
3638
3639 if (skip_host_mem_pin)
3640 parser->patched_cb_size += sizeof(*user_dma_pkt);
3641 else {
3642 if ((dir == DMA_TO_DEVICE) &&
3643 (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3644 dev_err(hdev->dev,
3645 "Can't DMA from host on queue other then 1\n");
3646 return -EFAULT;
3647 }
3648
3649 rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3650 addr, dir);
3651 }
3652
3653 return rc;
3654}
3655
3656static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3657 struct hl_cs_parser *parser,
3658 struct packet_lin_dma *user_dma_pkt)
3659{
3660 u64 sram_memory_addr, dram_memory_addr;
3661 enum goya_dma_direction user_dir;
3662
3663 user_dir = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3664 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3665
3666 if (user_dir == DMA_DRAM_TO_SRAM) {
3667 dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
3668 dram_memory_addr = user_dma_pkt->src_addr;
3669 sram_memory_addr = user_dma_pkt->dst_addr;
3670 } else {
3671 dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
3672 sram_memory_addr = user_dma_pkt->src_addr;
3673 dram_memory_addr = user_dma_pkt->dst_addr;
3674 }
3675
3676 if (!hl_mem_area_inside_range(sram_memory_addr, user_dma_pkt->tsize,
3677 hdev->asic_prop.sram_user_base_address,
3678 hdev->asic_prop.sram_end_address)) {
3679 dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3680 sram_memory_addr, user_dma_pkt->tsize);
3681 return -EFAULT;
3682 }
3683
3684 if (!hl_mem_area_inside_range(dram_memory_addr, user_dma_pkt->tsize,
3685 hdev->asic_prop.dram_user_base_address,
3686 hdev->asic_prop.dram_end_address)) {
3687 dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3688 dram_memory_addr, user_dma_pkt->tsize);
3689 return -EFAULT;
3690 }
3691
3692 parser->patched_cb_size += sizeof(*user_dma_pkt);
3693
3694 return 0;
3695}
3696
3697static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3698 struct hl_cs_parser *parser,
3699 struct packet_lin_dma *user_dma_pkt)
3700{
3701 enum goya_dma_direction user_dir;
3702 int rc;
3703
3704 dev_dbg(hdev->dev, "DMA packet details:\n");
3705 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3706 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3707 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3708
3709 user_dir = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3710 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3711
3712 /*
3713 * Special handling for DMA with size 0. The H/W has a bug where
3714 * this can cause the QMAN DMA to get stuck, so block it here.
3715 */
3716 if (user_dma_pkt->tsize == 0) {
3717 dev_err(hdev->dev,
3718 "Got DMA with size 0, might reset the device\n");
3719 return -EINVAL;
3720 }
3721
3722 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
3723 rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3724 else
3725 rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3726
3727 return rc;
3728}
3729
3730static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3731 struct hl_cs_parser *parser,
3732 struct packet_lin_dma *user_dma_pkt)
3733{
3734 dev_dbg(hdev->dev, "DMA packet details:\n");
3735 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3736 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3737 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3738
3739 /*
3740 * WA for HW-23.
3741 * We can't allow user to read from Host using QMANs other than 1.
3742 */
3743 if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
3744 hl_mem_area_inside_range(user_dma_pkt->src_addr,
3745 user_dma_pkt->tsize,
3746 hdev->asic_prop.va_space_host_start_address,
3747 hdev->asic_prop.va_space_host_end_address)) {
3748 dev_err(hdev->dev,
3749 "Can't DMA from host on queue other then 1\n");
3750 return -EFAULT;
3751 }
3752
3753 if (user_dma_pkt->tsize == 0) {
3754 dev_err(hdev->dev,
3755 "Got DMA with size 0, might reset the device\n");
3756 return -EINVAL;
3757 }
3758
3759 parser->patched_cb_size += sizeof(*user_dma_pkt);
3760
3761 return 0;
3762}
3763
3764static int goya_validate_wreg32(struct hl_device *hdev,
3765 struct hl_cs_parser *parser,
3766 struct packet_wreg32 *wreg_pkt)
3767{
3768 struct goya_device *goya = hdev->asic_specific;
3769 u32 sob_start_addr, sob_end_addr;
3770 u16 reg_offset;
3771
3772 reg_offset = wreg_pkt->ctl & GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
3773
3774 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3775 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3776 dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
3777
Oded Gabbay6765fda2019-02-28 10:46:14 +02003778 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003779 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3780 reg_offset);
3781 return -EPERM;
3782 }
3783
3784 /*
3785 * With MMU, DMA channels are not secured, so it doesn't matter where
3786 * the WR COMP will be written to because it will go out with
3787 * non-secured property
3788 */
3789 if (goya->hw_cap_initialized & HW_CAP_MMU)
3790 return 0;
3791
3792 sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3793 sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3794
3795 if ((wreg_pkt->value < sob_start_addr) ||
3796 (wreg_pkt->value > sob_end_addr)) {
3797
3798 dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3799 wreg_pkt->value);
3800 return -EPERM;
3801 }
3802
3803 return 0;
3804}
3805
3806static int goya_validate_cb(struct hl_device *hdev,
3807 struct hl_cs_parser *parser, bool is_mmu)
3808{
3809 u32 cb_parsed_length = 0;
3810 int rc = 0;
3811
3812 parser->patched_cb_size = 0;
3813
3814 /* cb_user_size is more than 0 so loop will always be executed */
3815 while (cb_parsed_length < parser->user_cb_size) {
3816 enum packet_id pkt_id;
3817 u16 pkt_size;
3818 void *user_pkt;
3819
3820 user_pkt = (void *) (uintptr_t)
3821 (parser->user_cb->kernel_address + cb_parsed_length);
3822
3823 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
3824 PACKET_HEADER_PACKET_ID_MASK) >>
3825 PACKET_HEADER_PACKET_ID_SHIFT);
3826
3827 pkt_size = goya_packet_sizes[pkt_id];
3828 cb_parsed_length += pkt_size;
3829 if (cb_parsed_length > parser->user_cb_size) {
3830 dev_err(hdev->dev,
3831 "packet 0x%x is out of CB boundary\n", pkt_id);
3832 rc = -EINVAL;
3833 break;
3834 }
3835
3836 switch (pkt_id) {
3837 case PACKET_WREG_32:
3838 /*
3839 * Although it is validated after copy in patch_cb(),
3840 * need to validate here as well because patch_cb() is
3841 * not called in MMU path while this function is called
3842 */
3843 rc = goya_validate_wreg32(hdev, parser, user_pkt);
3844 break;
3845
3846 case PACKET_WREG_BULK:
3847 dev_err(hdev->dev,
3848 "User not allowed to use WREG_BULK\n");
3849 rc = -EPERM;
3850 break;
3851
3852 case PACKET_MSG_PROT:
3853 dev_err(hdev->dev,
3854 "User not allowed to use MSG_PROT\n");
3855 rc = -EPERM;
3856 break;
3857
3858 case PACKET_CP_DMA:
3859 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3860 rc = -EPERM;
3861 break;
3862
3863 case PACKET_STOP:
3864 dev_err(hdev->dev, "User not allowed to use STOP\n");
3865 rc = -EPERM;
3866 break;
3867
3868 case PACKET_LIN_DMA:
3869 if (is_mmu)
3870 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3871 user_pkt);
3872 else
3873 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3874 user_pkt);
3875 break;
3876
3877 case PACKET_MSG_LONG:
3878 case PACKET_MSG_SHORT:
3879 case PACKET_FENCE:
3880 case PACKET_NOP:
3881 parser->patched_cb_size += pkt_size;
3882 break;
3883
3884 default:
3885 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3886 pkt_id);
3887 rc = -EINVAL;
3888 break;
3889 }
3890
3891 if (rc)
3892 break;
3893 }
3894
3895 /*
3896 * The new CB should have space at the end for two MSG_PROT packets:
3897 * 1. A packet that will act as a completion packet
3898 * 2. A packet that will generate MSI-X interrupt
3899 */
3900 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3901
3902 return rc;
3903}
3904
3905static int goya_patch_dma_packet(struct hl_device *hdev,
3906 struct hl_cs_parser *parser,
3907 struct packet_lin_dma *user_dma_pkt,
3908 struct packet_lin_dma *new_dma_pkt,
3909 u32 *new_dma_pkt_size)
3910{
3911 struct hl_userptr *userptr;
3912 struct scatterlist *sg, *sg_next_iter;
Oded Gabbaye99f16832019-02-24 11:55:26 +02003913 u32 count, dma_desc_cnt;
3914 u64 len, len_next;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003915 dma_addr_t dma_addr, dma_addr_next;
3916 enum goya_dma_direction user_dir;
3917 u64 device_memory_addr, addr;
3918 enum dma_data_direction dir;
3919 struct sg_table *sgt;
3920 bool skip_host_mem_pin = false;
3921 bool user_memset;
3922 u32 user_rdcomp_mask, user_wrcomp_mask;
3923
3924 user_dir = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3925 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3926
3927 user_memset = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3928 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3929
3930 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
3931 (user_dma_pkt->tsize == 0)) {
3932 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3933 *new_dma_pkt_size = sizeof(*new_dma_pkt);
3934 return 0;
3935 }
3936
3937 if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
3938 addr = user_dma_pkt->src_addr;
3939 device_memory_addr = user_dma_pkt->dst_addr;
3940 dir = DMA_TO_DEVICE;
3941 if (user_memset)
3942 skip_host_mem_pin = true;
3943 } else {
3944 addr = user_dma_pkt->dst_addr;
3945 device_memory_addr = user_dma_pkt->src_addr;
3946 dir = DMA_FROM_DEVICE;
3947 }
3948
3949 if ((!skip_host_mem_pin) &&
3950 (hl_userptr_is_pinned(hdev, addr, user_dma_pkt->tsize,
3951 parser->job_userptr_list, &userptr) == false)) {
3952 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3953 addr, user_dma_pkt->tsize);
3954 return -EFAULT;
3955 }
3956
3957 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3958 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3959 *new_dma_pkt_size = sizeof(*user_dma_pkt);
3960 return 0;
3961 }
3962
3963 user_rdcomp_mask =
3964 (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK);
3965
3966 user_wrcomp_mask =
3967 (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
3968
3969 sgt = userptr->sgt;
3970 dma_desc_cnt = 0;
3971
3972 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3973 len = sg_dma_len(sg);
3974 dma_addr = sg_dma_address(sg);
3975
3976 if (len == 0)
3977 break;
3978
3979 while ((count + 1) < sgt->nents) {
3980 sg_next_iter = sg_next(sg);
3981 len_next = sg_dma_len(sg_next_iter);
3982 dma_addr_next = sg_dma_address(sg_next_iter);
3983
3984 if (len_next == 0)
3985 break;
3986
3987 if ((dma_addr + len == dma_addr_next) &&
3988 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3989 len += len_next;
3990 count++;
3991 sg = sg_next_iter;
3992 } else {
3993 break;
3994 }
3995 }
3996
3997 new_dma_pkt->ctl = user_dma_pkt->ctl;
3998 if (likely(dma_desc_cnt))
3999 new_dma_pkt->ctl &= ~GOYA_PKT_CTL_EB_MASK;
4000 new_dma_pkt->ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
4001 GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
4002 new_dma_pkt->tsize = len;
4003
4004 dma_addr += hdev->asic_prop.host_phys_base_address;
4005
4006 if (dir == DMA_TO_DEVICE) {
4007 new_dma_pkt->src_addr = dma_addr;
4008 new_dma_pkt->dst_addr = device_memory_addr;
4009 } else {
4010 new_dma_pkt->src_addr = device_memory_addr;
4011 new_dma_pkt->dst_addr = dma_addr;
4012 }
4013
4014 if (!user_memset)
4015 device_memory_addr += len;
4016 dma_desc_cnt++;
4017 new_dma_pkt++;
4018 }
4019
4020 if (!dma_desc_cnt) {
4021 dev_err(hdev->dev,
4022 "Error of 0 SG entries when patching DMA packet\n");
4023 return -EFAULT;
4024 }
4025
4026 /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
4027 new_dma_pkt--;
4028 new_dma_pkt->ctl |= (user_rdcomp_mask | user_wrcomp_mask);
4029
4030 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
4031
4032 return 0;
4033}
4034
4035static int goya_patch_cb(struct hl_device *hdev,
4036 struct hl_cs_parser *parser)
4037{
4038 u32 cb_parsed_length = 0;
4039 u32 cb_patched_cur_length = 0;
4040 int rc = 0;
4041
4042 /* cb_user_size is more than 0 so loop will always be executed */
4043 while (cb_parsed_length < parser->user_cb_size) {
4044 enum packet_id pkt_id;
4045 u16 pkt_size;
4046 u32 new_pkt_size = 0;
4047 void *user_pkt, *kernel_pkt;
4048
4049 user_pkt = (void *) (uintptr_t)
4050 (parser->user_cb->kernel_address + cb_parsed_length);
4051 kernel_pkt = (void *) (uintptr_t)
4052 (parser->patched_cb->kernel_address +
4053 cb_patched_cur_length);
4054
4055 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
4056 PACKET_HEADER_PACKET_ID_MASK) >>
4057 PACKET_HEADER_PACKET_ID_SHIFT);
4058
4059 pkt_size = goya_packet_sizes[pkt_id];
4060 cb_parsed_length += pkt_size;
4061 if (cb_parsed_length > parser->user_cb_size) {
4062 dev_err(hdev->dev,
4063 "packet 0x%x is out of CB boundary\n", pkt_id);
4064 rc = -EINVAL;
4065 break;
4066 }
4067
4068 switch (pkt_id) {
4069 case PACKET_LIN_DMA:
4070 rc = goya_patch_dma_packet(hdev, parser, user_pkt,
4071 kernel_pkt, &new_pkt_size);
4072 cb_patched_cur_length += new_pkt_size;
4073 break;
4074
4075 case PACKET_WREG_32:
4076 memcpy(kernel_pkt, user_pkt, pkt_size);
4077 cb_patched_cur_length += pkt_size;
4078 rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
4079 break;
4080
4081 case PACKET_WREG_BULK:
4082 dev_err(hdev->dev,
4083 "User not allowed to use WREG_BULK\n");
4084 rc = -EPERM;
4085 break;
4086
4087 case PACKET_MSG_PROT:
4088 dev_err(hdev->dev,
4089 "User not allowed to use MSG_PROT\n");
4090 rc = -EPERM;
4091 break;
4092
4093 case PACKET_CP_DMA:
4094 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
4095 rc = -EPERM;
4096 break;
4097
4098 case PACKET_STOP:
4099 dev_err(hdev->dev, "User not allowed to use STOP\n");
4100 rc = -EPERM;
4101 break;
4102
4103 case PACKET_MSG_LONG:
4104 case PACKET_MSG_SHORT:
4105 case PACKET_FENCE:
4106 case PACKET_NOP:
4107 memcpy(kernel_pkt, user_pkt, pkt_size);
4108 cb_patched_cur_length += pkt_size;
4109 break;
4110
4111 default:
4112 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
4113 pkt_id);
4114 rc = -EINVAL;
4115 break;
4116 }
4117
4118 if (rc)
4119 break;
4120 }
4121
4122 return rc;
4123}
4124
4125static int goya_parse_cb_mmu(struct hl_device *hdev,
4126 struct hl_cs_parser *parser)
4127{
4128 u64 patched_cb_handle;
4129 u32 patched_cb_size;
4130 struct hl_cb *user_cb;
4131 int rc;
4132
4133 /*
4134 * The new CB should have space at the end for two MSG_PROT pkt:
4135 * 1. A packet that will act as a completion packet
4136 * 2. A packet that will generate MSI-X interrupt
4137 */
4138 parser->patched_cb_size = parser->user_cb_size +
4139 sizeof(struct packet_msg_prot) * 2;
4140
4141 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
4142 parser->patched_cb_size,
4143 &patched_cb_handle, HL_KERNEL_ASID_ID);
4144
4145 if (rc) {
4146 dev_err(hdev->dev,
4147 "Failed to allocate patched CB for DMA CS %d\n",
4148 rc);
4149 return rc;
4150 }
4151
4152 patched_cb_handle >>= PAGE_SHIFT;
4153 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
4154 (u32) patched_cb_handle);
4155 /* hl_cb_get should never fail here so use kernel WARN */
4156 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
4157 (u32) patched_cb_handle);
4158 if (!parser->patched_cb) {
4159 rc = -EFAULT;
4160 goto out;
4161 }
4162
4163 /*
4164 * The check that parser->user_cb_size <= parser->user_cb->size was done
4165 * in validate_queue_index().
4166 */
4167 memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
4168 (void *) (uintptr_t) parser->user_cb->kernel_address,
4169 parser->user_cb_size);
4170
4171 patched_cb_size = parser->patched_cb_size;
4172
4173 /* validate patched CB instead of user CB */
4174 user_cb = parser->user_cb;
4175 parser->user_cb = parser->patched_cb;
4176 rc = goya_validate_cb(hdev, parser, true);
4177 parser->user_cb = user_cb;
4178
4179 if (rc) {
4180 hl_cb_put(parser->patched_cb);
4181 goto out;
4182 }
4183
4184 if (patched_cb_size != parser->patched_cb_size) {
4185 dev_err(hdev->dev, "user CB size mismatch\n");
4186 hl_cb_put(parser->patched_cb);
4187 rc = -EINVAL;
4188 goto out;
4189 }
4190
4191out:
4192 /*
4193 * Always call cb destroy here because we still have 1 reference
4194 * to it by calling cb_get earlier. After the job will be completed,
4195 * cb_put will release it, but here we want to remove it from the
4196 * idr
4197 */
4198 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4199 patched_cb_handle << PAGE_SHIFT);
4200
4201 return rc;
4202}
4203
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004204static int goya_parse_cb_no_mmu(struct hl_device *hdev,
4205 struct hl_cs_parser *parser)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004206{
4207 u64 patched_cb_handle;
4208 int rc;
4209
4210 rc = goya_validate_cb(hdev, parser, false);
4211
4212 if (rc)
4213 goto free_userptr;
4214
4215 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
4216 parser->patched_cb_size,
4217 &patched_cb_handle, HL_KERNEL_ASID_ID);
4218 if (rc) {
4219 dev_err(hdev->dev,
4220 "Failed to allocate patched CB for DMA CS %d\n", rc);
4221 goto free_userptr;
4222 }
4223
4224 patched_cb_handle >>= PAGE_SHIFT;
4225 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
4226 (u32) patched_cb_handle);
4227 /* hl_cb_get should never fail here so use kernel WARN */
4228 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
4229 (u32) patched_cb_handle);
4230 if (!parser->patched_cb) {
4231 rc = -EFAULT;
4232 goto out;
4233 }
4234
4235 rc = goya_patch_cb(hdev, parser);
4236
4237 if (rc)
4238 hl_cb_put(parser->patched_cb);
4239
4240out:
4241 /*
4242 * Always call cb destroy here because we still have 1 reference
4243 * to it by calling cb_get earlier. After the job will be completed,
4244 * cb_put will release it, but here we want to remove it from the
4245 * idr
4246 */
4247 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4248 patched_cb_handle << PAGE_SHIFT);
4249
4250free_userptr:
4251 if (rc)
4252 hl_userptr_delete_list(hdev, parser->job_userptr_list);
4253 return rc;
4254}
4255
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004256static int goya_parse_cb_no_ext_quque(struct hl_device *hdev,
4257 struct hl_cs_parser *parser)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004258{
4259 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
4260 struct goya_device *goya = hdev->asic_specific;
4261
4262 if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
4263 /* For internal queue jobs, just check if cb address is valid */
4264 if (hl_mem_area_inside_range(
4265 (u64) (uintptr_t) parser->user_cb,
4266 parser->user_cb_size,
4267 asic_prop->sram_user_base_address,
4268 asic_prop->sram_end_address))
4269 return 0;
4270
4271 if (hl_mem_area_inside_range(
4272 (u64) (uintptr_t) parser->user_cb,
4273 parser->user_cb_size,
4274 asic_prop->dram_user_base_address,
4275 asic_prop->dram_end_address))
4276 return 0;
4277
4278 dev_err(hdev->dev,
4279 "Internal CB address 0x%llx + 0x%x is not in SRAM nor in DRAM\n",
4280 (u64) (uintptr_t) parser->user_cb,
4281 parser->user_cb_size);
4282
4283 return -EFAULT;
4284 }
4285
4286 return 0;
4287}
4288
4289int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
4290{
4291 struct goya_device *goya = hdev->asic_specific;
4292
4293 if (!parser->ext_queue)
4294 return goya_parse_cb_no_ext_quque(hdev, parser);
4295
4296 if ((goya->hw_cap_initialized & HW_CAP_MMU) && parser->use_virt_addr)
4297 return goya_parse_cb_mmu(hdev, parser);
4298 else
4299 return goya_parse_cb_no_mmu(hdev, parser);
4300}
4301
4302void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
4303 u32 cq_val, u32 msix_vec)
4304{
4305 struct packet_msg_prot *cq_pkt;
4306
4307 cq_pkt = (struct packet_msg_prot *) (uintptr_t)
4308 (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
4309
4310 cq_pkt->ctl = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4311 (1 << GOYA_PKT_CTL_EB_SHIFT) |
4312 (1 << GOYA_PKT_CTL_MB_SHIFT);
4313 cq_pkt->value = cq_val;
4314 cq_pkt->addr = cq_addr;
4315
4316 cq_pkt++;
4317
4318 cq_pkt->ctl = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4319 (1 << GOYA_PKT_CTL_MB_SHIFT);
4320 cq_pkt->value = msix_vec & 0x7FF;
4321 cq_pkt->addr = CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF;
4322}
4323
Oded Gabbay1251f232019-02-16 00:39:18 +02004324static void goya_update_eq_ci(struct hl_device *hdev, u32 val)
4325{
4326 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
4327}
4328
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004329static void goya_restore_phase_topology(struct hl_device *hdev)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004330{
4331 int i, num_of_sob_in_longs, num_of_mon_in_longs;
4332
4333 num_of_sob_in_longs =
4334 ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
4335
4336 num_of_mon_in_longs =
4337 ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
4338
4339 for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
4340 WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
4341
4342 for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
4343 WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
4344
4345 /* Flush all WREG to prevent race */
4346 i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
4347}
4348
Oded Gabbayc2164772019-02-16 00:39:24 +02004349/*
4350 * goya_debugfs_read32 - read a 32bit value from a given device address
4351 *
4352 * @hdev: pointer to hl_device structure
4353 * @addr: address in device
4354 * @val: returned value
4355 *
4356 * In case of DDR address that is not mapped into the default aperture that
4357 * the DDR bar exposes, the function will configure the iATU so that the DDR
4358 * bar will be positioned at a base address that allows reading from the
4359 * required address. Configuring the iATU during normal operation can
4360 * lead to undefined behavior and therefore, should be done with extreme care
4361 *
4362 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004363static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
Oded Gabbayc2164772019-02-16 00:39:24 +02004364{
4365 struct asic_fixed_properties *prop = &hdev->asic_prop;
4366 int rc = 0;
4367
4368 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4369 *val = RREG32(addr - CFG_BASE);
4370
4371 } else if ((addr >= SRAM_BASE_ADDR) &&
4372 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4373
4374 *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4375 (addr - SRAM_BASE_ADDR));
4376
4377 } else if ((addr >= DRAM_PHYS_BASE) &&
4378 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4379
4380 u64 bar_base_addr = DRAM_PHYS_BASE +
4381 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4382
4383 rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
4384 if (!rc) {
4385 *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
4386 (addr - bar_base_addr));
4387
4388 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
4389 (MMU_PAGE_TABLES_ADDR &
4390 ~(prop->dram_pci_bar_size - 0x1ull)));
4391 }
4392 } else {
4393 rc = -EFAULT;
4394 }
4395
4396 return rc;
4397}
4398
4399/*
4400 * goya_debugfs_write32 - write a 32bit value to a given device address
4401 *
4402 * @hdev: pointer to hl_device structure
4403 * @addr: address in device
4404 * @val: returned value
4405 *
4406 * In case of DDR address that is not mapped into the default aperture that
4407 * the DDR bar exposes, the function will configure the iATU so that the DDR
4408 * bar will be positioned at a base address that allows writing to the
4409 * required address. Configuring the iATU during normal operation can
4410 * lead to undefined behavior and therefore, should be done with extreme care
4411 *
4412 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004413static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
Oded Gabbayc2164772019-02-16 00:39:24 +02004414{
4415 struct asic_fixed_properties *prop = &hdev->asic_prop;
4416 int rc = 0;
4417
4418 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4419 WREG32(addr - CFG_BASE, val);
4420
4421 } else if ((addr >= SRAM_BASE_ADDR) &&
4422 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4423
4424 writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4425 (addr - SRAM_BASE_ADDR));
4426
4427 } else if ((addr >= DRAM_PHYS_BASE) &&
4428 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4429
4430 u64 bar_base_addr = DRAM_PHYS_BASE +
4431 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4432
4433 rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
4434 if (!rc) {
4435 writel(val, hdev->pcie_bar[DDR_BAR_ID] +
4436 (addr - bar_base_addr));
4437
4438 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
4439 (MMU_PAGE_TABLES_ADDR &
4440 ~(prop->dram_pci_bar_size - 0x1ull)));
4441 }
4442 } else {
4443 rc = -EFAULT;
4444 }
4445
4446 return rc;
4447}
4448
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004449static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4450{
4451 struct goya_device *goya = hdev->asic_specific;
4452
4453 return readq(hdev->pcie_bar[DDR_BAR_ID] +
4454 (addr - goya->ddr_bar_cur_addr));
4455}
4456
4457static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4458{
4459 struct goya_device *goya = hdev->asic_specific;
4460
4461 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4462 (addr - goya->ddr_bar_cur_addr));
4463}
4464
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004465static const char *_goya_get_event_desc(u16 event_type)
Oded Gabbay1251f232019-02-16 00:39:18 +02004466{
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004467 switch (event_type) {
4468 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4469 return "PCIe_dec";
4470 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4471 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4472 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4473 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4474 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4475 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4476 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4477 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4478 return "TPC%d_dec";
4479 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4480 return "MME_wacs";
4481 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4482 return "MME_wacsd";
4483 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4484 return "CPU_axi_splitter";
4485 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4486 return "PSOC_axi_dec";
4487 case GOYA_ASYNC_EVENT_ID_PSOC:
4488 return "PSOC";
4489 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4490 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4491 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4492 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4493 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4494 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4495 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4496 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4497 return "TPC%d_krn_err";
4498 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4499 return "TPC%d_cq";
4500 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4501 return "TPC%d_qm";
4502 case GOYA_ASYNC_EVENT_ID_MME_QM:
4503 return "MME_qm";
4504 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4505 return "MME_cq";
4506 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4507 return "DMA%d_qm";
4508 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4509 return "DMA%d_ch";
4510 default:
4511 return "N/A";
4512 }
Oded Gabbay1251f232019-02-16 00:39:18 +02004513}
4514
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004515static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
Oded Gabbay1251f232019-02-16 00:39:18 +02004516{
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004517 u8 index;
Oded Gabbay1251f232019-02-16 00:39:18 +02004518
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004519 switch (event_type) {
4520 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4521 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4522 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4523 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4524 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4525 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4526 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4527 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4528 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4529 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4530 break;
4531 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4532 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4533 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4534 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4535 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4536 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4537 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4538 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4539 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4540 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4541 break;
4542 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4543 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4544 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4545 break;
4546 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4547 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4548 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4549 break;
4550 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4551 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4552 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4553 break;
4554 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4555 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4556 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4557 break;
4558 default:
4559 snprintf(desc, size, _goya_get_event_desc(event_type));
4560 break;
4561 }
4562}
Oded Gabbay1251f232019-02-16 00:39:18 +02004563
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004564static void goya_print_razwi_info(struct hl_device *hdev)
4565{
4566 if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4567 dev_err(hdev->dev, "Illegal write to LBW\n");
4568 WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4569 }
Oded Gabbay1251f232019-02-16 00:39:18 +02004570
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004571 if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4572 dev_err(hdev->dev, "Illegal read from LBW\n");
4573 WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4574 }
4575
4576 if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4577 dev_err(hdev->dev, "Illegal write to HBW\n");
4578 WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4579 }
4580
4581 if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4582 dev_err(hdev->dev, "Illegal read from HBW\n");
4583 WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4584 }
4585}
4586
4587static void goya_print_mmu_error_info(struct hl_device *hdev)
4588{
4589 struct goya_device *goya = hdev->asic_specific;
4590 u64 addr;
4591 u32 val;
4592
4593 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4594 return;
4595
4596 val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4597 if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4598 addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4599 addr <<= 32;
4600 addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4601
4602 dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr);
4603
4604 WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
Oded Gabbay1251f232019-02-16 00:39:18 +02004605 }
4606}
4607
4608static void goya_print_irq_info(struct hl_device *hdev, u16 event_type)
4609{
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004610 char desc[20] = "";
Oded Gabbay1251f232019-02-16 00:39:18 +02004611
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004612 goya_get_event_desc(event_type, desc, sizeof(desc));
4613 dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4614 event_type, desc);
Oded Gabbay1251f232019-02-16 00:39:18 +02004615
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004616 goya_print_razwi_info(hdev);
4617 goya_print_mmu_error_info(hdev);
Oded Gabbay1251f232019-02-16 00:39:18 +02004618}
4619
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004620static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4621 size_t irq_arr_size)
4622{
4623 struct armcp_unmask_irq_arr_packet *pkt;
4624 size_t total_pkt_size;
4625 long result;
4626 int rc;
4627
4628 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
4629 irq_arr_size;
4630
4631 /* data should be aligned to 8 bytes in order to ArmCP to copy it */
4632 total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4633
4634 /* total_pkt_size is casted to u16 later on */
4635 if (total_pkt_size > USHRT_MAX) {
4636 dev_err(hdev->dev, "too many elements in IRQ array\n");
4637 return -EINVAL;
4638 }
4639
4640 pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4641 if (!pkt)
4642 return -ENOMEM;
4643
4644 pkt->length = irq_arr_size / sizeof(irq_arr[0]);
4645 memcpy(&pkt->irqs, irq_arr, irq_arr_size);
4646
4647 pkt->armcp_pkt.ctl = ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4648 ARMCP_PKT_CTL_OPCODE_SHIFT;
4649
4650 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
4651 total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
4652
4653 if (rc)
4654 dev_err(hdev->dev, "failed to unmask IRQ array\n");
4655
4656 kfree(pkt);
4657
4658 return rc;
4659}
4660
4661static int goya_soft_reset_late_init(struct hl_device *hdev)
4662{
4663 /*
4664 * Unmask all IRQs since some could have been received
4665 * during the soft reset
4666 */
4667 return goya_unmask_irq_arr(hdev, goya_non_fatal_events,
4668 sizeof(goya_non_fatal_events));
4669}
4670
Oded Gabbay1251f232019-02-16 00:39:18 +02004671static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4672{
4673 struct armcp_packet pkt;
4674 long result;
4675 int rc;
4676
4677 memset(&pkt, 0, sizeof(pkt));
4678
4679 pkt.ctl = ARMCP_PACKET_UNMASK_RAZWI_IRQ << ARMCP_PKT_CTL_OPCODE_SHIFT;
4680 pkt.value = event_type;
4681
4682 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4683 HL_DEVICE_TIMEOUT_USEC, &result);
4684
4685 if (rc)
4686 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4687
4688 return rc;
4689}
4690
4691void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4692{
4693 u16 event_type = ((eq_entry->hdr.ctl & EQ_CTL_EVENT_TYPE_MASK)
4694 >> EQ_CTL_EVENT_TYPE_SHIFT);
4695 struct goya_device *goya = hdev->asic_specific;
4696
4697 goya->events_stat[event_type]++;
4698
4699 switch (event_type) {
4700 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4701 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4702 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4703 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4704 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4705 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4706 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4707 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4708 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4709 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4710 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4711 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4712 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4713 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4714 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4715 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4716 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4717 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4718 case GOYA_ASYNC_EVENT_ID_GIC500:
4719 case GOYA_ASYNC_EVENT_ID_PLL0:
4720 case GOYA_ASYNC_EVENT_ID_PLL1:
4721 case GOYA_ASYNC_EVENT_ID_PLL3:
4722 case GOYA_ASYNC_EVENT_ID_PLL4:
4723 case GOYA_ASYNC_EVENT_ID_PLL5:
4724 case GOYA_ASYNC_EVENT_ID_PLL6:
4725 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4726 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4727 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4728 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4729 dev_err(hdev->dev,
4730 "Received H/W interrupt %d, reset the chip\n",
4731 event_type);
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004732 hl_device_reset(hdev, true, false);
Oded Gabbay1251f232019-02-16 00:39:18 +02004733 break;
4734
4735 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4736 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4737 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4738 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4739 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4740 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4741 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4742 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4743 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4744 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4745 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4746 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4747 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4748 case GOYA_ASYNC_EVENT_ID_PSOC:
4749 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4750 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4751 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4752 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4753 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4754 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4755 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4756 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4757 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4758 case GOYA_ASYNC_EVENT_ID_MME_QM:
4759 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4760 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4761 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4762 goya_print_irq_info(hdev, event_type);
4763 goya_unmask_irq(hdev, event_type);
4764 break;
4765
4766 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4767 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4768 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4769 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4770 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4771 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4772 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4773 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4774 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0:
4775 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH1:
4776 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH2:
4777 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH3:
4778 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4779 dev_info(hdev->dev, "Received H/W interrupt %d\n", event_type);
4780 break;
4781
4782 default:
4783 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4784 event_type);
4785 break;
4786 }
4787}
4788
4789void *goya_get_events_stat(struct hl_device *hdev, u32 *size)
4790{
4791 struct goya_device *goya = hdev->asic_specific;
4792
4793 *size = (u32) sizeof(goya->events_stat);
4794
4795 return goya->events_stat;
4796}
4797
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004798static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
4799 u64 val, bool is_dram)
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004800{
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004801 struct packet_lin_dma *lin_dma_pkt;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004802 struct hl_cs_parser parser;
4803 struct hl_cs_job *job;
4804 u32 cb_size;
4805 struct hl_cb *cb;
4806 int rc;
4807
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004808 cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
4809 if (!cb)
4810 return -EFAULT;
4811
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004812 lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004813
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004814 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4815 cb_size = sizeof(*lin_dma_pkt);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004816
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004817 lin_dma_pkt->ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4818 (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4819 (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4820 (1 << GOYA_PKT_CTL_RB_SHIFT) |
4821 (1 << GOYA_PKT_CTL_MB_SHIFT));
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004822
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004823 lin_dma_pkt->ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
4824 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4825
4826 lin_dma_pkt->src_addr = val;
4827 lin_dma_pkt->dst_addr = addr;
4828 lin_dma_pkt->tsize = size;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004829
4830 job = hl_cs_allocate_job(hdev, true);
4831 if (!job) {
4832 dev_err(hdev->dev, "Failed to allocate a new job\n");
4833 rc = -ENOMEM;
4834 goto release_cb;
4835 }
4836
4837 job->id = 0;
4838 job->user_cb = cb;
4839 job->user_cb->cs_cnt++;
4840 job->user_cb_size = cb_size;
4841 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4842
Oded Gabbayc2164772019-02-16 00:39:24 +02004843 hl_debugfs_add_job(hdev, job);
4844
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004845 parser.ctx_id = HL_KERNEL_ASID_ID;
4846 parser.cs_sequence = 0;
4847 parser.job_id = job->id;
4848 parser.hw_queue_id = job->hw_queue_id;
4849 parser.job_userptr_list = &job->userptr_list;
4850 parser.user_cb = job->user_cb;
4851 parser.user_cb_size = job->user_cb_size;
4852 parser.ext_queue = job->ext_queue;
4853 parser.use_virt_addr = hdev->mmu_enable;
4854
4855 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
4856 if (rc) {
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004857 dev_err(hdev->dev, "Failed to parse kernel CB\n");
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004858 goto free_job;
4859 }
4860
4861 job->patched_cb = parser.patched_cb;
4862 job->job_cb_size = parser.patched_cb_size;
4863 job->patched_cb->cs_cnt++;
4864
4865 rc = goya_send_job_on_qman0(hdev, job);
4866
4867 job->patched_cb->cs_cnt--;
4868 hl_cb_put(job->patched_cb);
4869
4870free_job:
4871 hl_userptr_delete_list(hdev, &job->userptr_list);
Oded Gabbayc2164772019-02-16 00:39:24 +02004872 hl_debugfs_remove_job(hdev, job);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004873 kfree(job);
4874 cb->cs_cnt--;
4875
4876release_cb:
4877 hl_cb_put(cb);
4878 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
4879
4880 return rc;
4881}
4882
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004883static int goya_context_switch(struct hl_device *hdev, u32 asid)
4884{
4885 struct asic_fixed_properties *prop = &hdev->asic_prop;
4886 u64 addr = prop->sram_base_address;
4887 u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4888 u64 val = 0x7777777777777777ull;
4889 int rc;
4890
4891 rc = goya_memset_device_memory(hdev, addr, size, val, false);
4892 if (rc) {
4893 dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4894 return rc;
4895 }
4896
4897 goya_mmu_prepare(hdev, asid);
4898
4899 return 0;
4900}
4901
4902static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4903{
4904 struct asic_fixed_properties *prop = &hdev->asic_prop;
4905 struct goya_device *goya = hdev->asic_specific;
4906 u64 addr = prop->mmu_pgt_addr;
4907 u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4908 MMU_CACHE_MNG_SIZE;
4909
4910 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4911 return 0;
4912
4913 return goya_memset_device_memory(hdev, addr, size, 0, true);
4914}
4915
4916static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4917{
4918 struct goya_device *goya = hdev->asic_specific;
4919 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
4920 u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
4921 u64 val = 0x9999999999999999ull;
4922
4923 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4924 return 0;
4925
4926 return goya_memset_device_memory(hdev, addr, size, val, true);
4927}
4928
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004929static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
4930{
4931 struct goya_device *goya = hdev->asic_specific;
4932 int i;
4933
4934 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4935 return;
4936
4937 if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
4938 WARN(1, "asid %u is too big\n", asid);
4939 return;
4940 }
4941
4942 /* zero the MMBP and ASID bits and then set the ASID */
4943 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) {
4944 WREG32_AND(goya_mmu_regs[i], ~0x7FF);
4945 WREG32_OR(goya_mmu_regs[i], asid);
4946 }
4947}
4948
4949static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
4950{
4951 struct goya_device *goya = hdev->asic_specific;
4952 u32 status, timeout_usec;
4953 int rc;
4954
4955 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4956 return;
4957
4958 /* no need in L1 only invalidation in Goya */
4959 if (!is_hard)
4960 return;
4961
4962 if (hdev->pldm)
4963 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4964 else
4965 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4966
4967 mutex_lock(&hdev->mmu_cache_lock);
4968
4969 /* L0 & L1 invalidation */
4970 WREG32(mmSTLB_INV_ALL_START, 1);
4971
4972 rc = hl_poll_timeout(
4973 hdev,
4974 mmSTLB_INV_ALL_START,
4975 status,
4976 !status,
4977 1000,
4978 timeout_usec);
4979
4980 mutex_unlock(&hdev->mmu_cache_lock);
4981
4982 if (rc)
4983 dev_notice_ratelimited(hdev->dev,
4984 "Timeout when waiting for MMU cache invalidation\n");
4985}
4986
4987static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
4988 bool is_hard, u32 asid, u64 va, u64 size)
4989{
4990 struct goya_device *goya = hdev->asic_specific;
4991 u32 status, timeout_usec, inv_data, pi;
4992 int rc;
4993
4994 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4995 return;
4996
4997 /* no need in L1 only invalidation in Goya */
4998 if (!is_hard)
4999 return;
5000
5001 if (hdev->pldm)
5002 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5003 else
5004 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5005
5006 mutex_lock(&hdev->mmu_cache_lock);
5007
5008 /*
5009 * TODO: currently invalidate entire L0 & L1 as in regular hard
5010 * invalidation. Need to apply invalidation of specific cache lines with
5011 * mask of ASID & VA & size.
5012 * Note that L1 with be flushed entirely in any case.
5013 */
5014
5015 /* L0 & L1 invalidation */
5016 inv_data = RREG32(mmSTLB_CACHE_INV);
5017 /* PI is 8 bit */
5018 pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
5019 WREG32(mmSTLB_CACHE_INV,
5020 (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
5021
5022 rc = hl_poll_timeout(
5023 hdev,
5024 mmSTLB_INV_CONSUMER_INDEX,
5025 status,
5026 status == pi,
5027 1000,
5028 timeout_usec);
5029
5030 mutex_unlock(&hdev->mmu_cache_lock);
5031
5032 if (rc)
5033 dev_notice_ratelimited(hdev->dev,
5034 "Timeout when waiting for MMU cache invalidation\n");
5035}
5036
5037static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
5038 u64 phys_addr)
5039{
5040 u32 status, timeout_usec;
5041 int rc;
5042
5043 if (hdev->pldm)
5044 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5045 else
5046 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5047
5048 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
5049 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
5050 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
5051
5052 rc = hl_poll_timeout(
5053 hdev,
5054 MMU_ASID_BUSY,
5055 status,
5056 !(status & 0x80000000),
5057 1000,
5058 timeout_usec);
5059
5060 if (rc) {
5061 dev_err(hdev->dev,
5062 "Timeout during MMU hop0 config of asid %d\n", asid);
5063 return rc;
5064 }
5065
5066 return 0;
5067}
5068
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005069int goya_send_heartbeat(struct hl_device *hdev)
5070{
5071 struct goya_device *goya = hdev->asic_specific;
5072 struct armcp_packet hb_pkt;
5073 long result;
5074 int rc;
5075
5076 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5077 return 0;
5078
5079 memset(&hb_pkt, 0, sizeof(hb_pkt));
5080
5081 hb_pkt.ctl = ARMCP_PACKET_TEST << ARMCP_PKT_CTL_OPCODE_SHIFT;
5082 hb_pkt.value = ARMCP_PACKET_FENCE_VAL;
5083
5084 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
5085 sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
5086
5087 if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
5088 rc = -EIO;
5089
5090 return rc;
5091}
5092
Oded Gabbayd91389b2019-02-16 00:39:19 +02005093static int goya_armcp_info_get(struct hl_device *hdev)
5094{
5095 struct goya_device *goya = hdev->asic_specific;
5096 struct asic_fixed_properties *prop = &hdev->asic_prop;
5097 struct armcp_packet pkt;
5098 void *armcp_info_cpu_addr;
5099 dma_addr_t armcp_info_dma_addr;
5100 u64 dram_size;
5101 long result;
5102 int rc;
5103
5104 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5105 return 0;
5106
5107 armcp_info_cpu_addr =
5108 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
5109 sizeof(struct armcp_info), &armcp_info_dma_addr);
5110 if (!armcp_info_cpu_addr) {
5111 dev_err(hdev->dev,
5112 "Failed to allocate DMA memory for ArmCP info packet\n");
5113 return -ENOMEM;
5114 }
5115
5116 memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
5117
5118 memset(&pkt, 0, sizeof(pkt));
5119
5120 pkt.ctl = ARMCP_PACKET_INFO_GET << ARMCP_PKT_CTL_OPCODE_SHIFT;
5121 pkt.addr = armcp_info_dma_addr + prop->host_phys_base_address;
5122 pkt.data_max_size = sizeof(struct armcp_info);
5123
5124 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
5125 GOYA_ARMCP_INFO_TIMEOUT, &result);
5126
5127 if (rc) {
5128 dev_err(hdev->dev,
5129 "Failed to send armcp info pkt, error %d\n", rc);
5130 goto out;
5131 }
5132
5133 memcpy(&prop->armcp_info, armcp_info_cpu_addr,
5134 sizeof(prop->armcp_info));
5135
5136 dram_size = prop->armcp_info.dram_size;
5137 if (dram_size) {
5138 if ((!is_power_of_2(dram_size)) ||
5139 (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
5140 dev_err(hdev->dev,
5141 "F/W reported invalid DRAM size %llu. Trying to use default size\n",
5142 dram_size);
5143 dram_size = DRAM_PHYS_DEFAULT_SIZE;
5144 }
5145
5146 prop->dram_size = dram_size;
5147 prop->dram_end_address = prop->dram_base_address + dram_size;
5148 }
5149
5150 rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
5151 if (rc) {
5152 dev_err(hdev->dev,
5153 "Failed to build hwmon channel info, error %d\n", rc);
5154 rc = -EFAULT;
5155 goto out;
5156 }
5157
5158out:
5159 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
5160 sizeof(struct armcp_info), armcp_info_cpu_addr);
5161
5162 return rc;
5163}
5164
5165static void goya_init_clock_gating(struct hl_device *hdev)
5166{
5167
5168}
5169
5170static void goya_disable_clock_gating(struct hl_device *hdev)
5171{
5172
5173}
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005174
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02005175static bool goya_is_device_idle(struct hl_device *hdev)
5176{
5177 u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
5178 int i;
5179
5180 offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
5181
5182 for (i = 0 ; i < DMA_MAX_NUM ; i++) {
5183 dma_qm_reg = mmDMA_QM_0_GLBL_STS0 + i * offset;
5184
5185 if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
5186 DMA_QM_IDLE_MASK)
5187 return false;
5188 }
5189
5190 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
5191
5192 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
5193 tpc_qm_reg = mmTPC0_QM_GLBL_STS0 + i * offset;
5194 tpc_cmdq_reg = mmTPC0_CMDQ_GLBL_STS0 + i * offset;
5195 tpc_cfg_reg = mmTPC0_CFG_STATUS + i * offset;
5196
5197 if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
5198 TPC_QM_IDLE_MASK)
5199 return false;
5200
5201 if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
5202 TPC_CMDQ_IDLE_MASK)
5203 return false;
5204
5205 if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
5206 TPC_CFG_IDLE_MASK)
5207 return false;
5208 }
5209
5210 if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
5211 MME_QM_IDLE_MASK)
5212 return false;
5213
5214 if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
5215 MME_CMDQ_IDLE_MASK)
5216 return false;
5217
5218 if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
5219 MME_ARCH_IDLE_MASK)
5220 return false;
5221
5222 if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
5223 return false;
5224
5225 return true;
5226}
5227
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005228static void goya_hw_queues_lock(struct hl_device *hdev)
5229{
5230 struct goya_device *goya = hdev->asic_specific;
5231
5232 spin_lock(&goya->hw_queues_lock);
5233}
5234
5235static void goya_hw_queues_unlock(struct hl_device *hdev)
5236{
5237 struct goya_device *goya = hdev->asic_specific;
5238
5239 spin_unlock(&goya->hw_queues_lock);
5240}
5241
Oded Gabbayd8dd7b02019-02-16 00:39:23 +02005242static u32 goya_get_pci_id(struct hl_device *hdev)
5243{
5244 return hdev->pdev->device;
5245}
5246
Oded Gabbay5e6e0232019-02-27 12:15:16 +02005247static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5248 size_t max_size)
Oded Gabbayd91389b2019-02-16 00:39:19 +02005249{
5250 struct goya_device *goya = hdev->asic_specific;
5251 struct asic_fixed_properties *prop = &hdev->asic_prop;
5252 struct armcp_packet pkt;
5253 void *eeprom_info_cpu_addr;
5254 dma_addr_t eeprom_info_dma_addr;
5255 long result;
5256 int rc;
5257
5258 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5259 return 0;
5260
5261 eeprom_info_cpu_addr =
5262 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
5263 max_size, &eeprom_info_dma_addr);
5264 if (!eeprom_info_cpu_addr) {
5265 dev_err(hdev->dev,
5266 "Failed to allocate DMA memory for EEPROM info packet\n");
5267 return -ENOMEM;
5268 }
5269
5270 memset(eeprom_info_cpu_addr, 0, max_size);
5271
5272 memset(&pkt, 0, sizeof(pkt));
5273
5274 pkt.ctl = ARMCP_PACKET_EEPROM_DATA_GET << ARMCP_PKT_CTL_OPCODE_SHIFT;
5275 pkt.addr = eeprom_info_dma_addr + prop->host_phys_base_address;
5276 pkt.data_max_size = max_size;
5277
5278 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
5279 GOYA_ARMCP_EEPROM_TIMEOUT, &result);
5280
5281 if (rc) {
5282 dev_err(hdev->dev,
5283 "Failed to send armcp EEPROM pkt, error %d\n", rc);
5284 goto out;
5285 }
5286
5287 /* result contains the actual size */
5288 memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
5289
5290out:
5291 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
5292 eeprom_info_cpu_addr);
5293
5294 return rc;
5295}
5296
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005297static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
5298{
5299 return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS);
5300}
5301
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005302static const struct hl_asic_funcs goya_funcs = {
5303 .early_init = goya_early_init,
5304 .early_fini = goya_early_fini,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005305 .late_init = goya_late_init,
5306 .late_fini = goya_late_fini,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005307 .sw_init = goya_sw_init,
5308 .sw_fini = goya_sw_fini,
Oded Gabbay839c4802019-02-16 00:39:16 +02005309 .hw_init = goya_hw_init,
5310 .hw_fini = goya_hw_fini,
Oded Gabbay1251f232019-02-16 00:39:18 +02005311 .halt_engines = goya_halt_engines,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005312 .suspend = goya_suspend,
5313 .resume = goya_resume,
Oded Gabbaybe5d9262019-02-16 00:39:15 +02005314 .cb_mmap = goya_cb_mmap,
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005315 .ring_doorbell = goya_ring_doorbell,
5316 .flush_pq_write = goya_flush_pq_write,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005317 .dma_alloc_coherent = goya_dma_alloc_coherent,
5318 .dma_free_coherent = goya_dma_free_coherent,
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005319 .get_int_queue_base = goya_get_int_queue_base,
5320 .test_queues = goya_test_queues,
5321 .dma_pool_zalloc = goya_dma_pool_zalloc,
5322 .dma_pool_free = goya_dma_pool_free,
5323 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
5324 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02005325 .hl_dma_unmap_sg = goya_dma_unmap_sg,
5326 .cs_parser = goya_cs_parser,
5327 .asic_dma_map_sg = goya_dma_map_sg,
5328 .get_dma_desc_list_size = goya_get_dma_desc_list_size,
5329 .add_end_of_cb_packets = goya_add_end_of_cb_packets,
Oded Gabbay1251f232019-02-16 00:39:18 +02005330 .update_eq_ci = goya_update_eq_ci,
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02005331 .context_switch = goya_context_switch,
5332 .restore_phase_topology = goya_restore_phase_topology,
Oded Gabbayc2164772019-02-16 00:39:24 +02005333 .debugfs_read32 = goya_debugfs_read32,
5334 .debugfs_write32 = goya_debugfs_write32,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005335 .add_device_attr = goya_add_device_attr,
Oded Gabbay1251f232019-02-16 00:39:18 +02005336 .handle_eqe = goya_handle_eqe,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005337 .set_pll_profile = goya_set_pll_profile,
Oded Gabbay1251f232019-02-16 00:39:18 +02005338 .get_events_stat = goya_get_events_stat,
Omer Shpigelman0feaf862019-02-16 00:39:22 +02005339 .read_pte = goya_read_pte,
5340 .write_pte = goya_write_pte,
5341 .mmu_invalidate_cache = goya_mmu_invalidate_cache,
5342 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005343 .send_heartbeat = goya_send_heartbeat,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005344 .enable_clock_gating = goya_init_clock_gating,
5345 .disable_clock_gating = goya_disable_clock_gating,
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02005346 .is_device_idle = goya_is_device_idle,
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005347 .soft_reset_late_init = goya_soft_reset_late_init,
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005348 .hw_queues_lock = goya_hw_queues_lock,
5349 .hw_queues_unlock = goya_hw_queues_unlock,
Oded Gabbayd8dd7b02019-02-16 00:39:23 +02005350 .get_pci_id = goya_get_pci_id,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005351 .get_eeprom_data = goya_get_eeprom_data,
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005352 .send_cpu_message = goya_send_cpu_message,
5353 .get_hw_state = goya_get_hw_state
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005354};
5355
5356/*
5357 * goya_set_asic_funcs - set Goya function pointers
5358 *
5359 * @*hdev: pointer to hl_device structure
5360 *
5361 */
5362void goya_set_asic_funcs(struct hl_device *hdev)
5363{
5364 hdev->asic_funcs = &goya_funcs;
5365}