blob: 30f83521de90589acdeb3ca9b252dfa3613ad9d4 [file] [log] [blame]
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02001// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "goyaP.h"
Omer Shpigelman0feaf862019-02-16 00:39:22 +02009#include "include/hw_ip/mmu/mmu_general.h"
10#include "include/hw_ip/mmu/mmu_v1_0.h"
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020011#include "include/goya/asic_reg/goya_masks.h"
12
13#include <linux/pci.h>
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020014#include <linux/genalloc.h>
Oded Gabbayd91389b2019-02-16 00:39:19 +020015#include <linux/hwmon.h>
Oded Gabbay839c4802019-02-16 00:39:16 +020016#include <linux/io-64-nonatomic-lo-hi.h>
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020017
18/*
19 * GOYA security scheme:
20 *
21 * 1. Host is protected by:
22 * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
23 * - MMU
24 *
25 * 2. DRAM is protected by:
26 * - Range registers (protect the first 512MB)
27 * - MMU (isolation between users)
28 *
29 * 3. Configuration is protected by:
30 * - Range registers
31 * - Protection bits
32 *
33 * When MMU is disabled:
34 *
35 * QMAN DMA: PQ, CQ, CP, DMA are secured.
36 * PQ, CB and the data are on the host.
37 *
38 * QMAN TPC/MME:
39 * PQ, CQ and CP are not secured.
40 * PQ, CB and the data are on the SRAM/DRAM.
41 *
42 * Since QMAN DMA is secured, KMD is parsing the DMA CB:
43 * - KMD checks DMA pointer
44 * - WREG, MSG_PROT are not allowed.
45 * - MSG_LONG/SHORT are allowed.
46 *
47 * A read/write transaction by the QMAN to a protected area will succeed if
48 * and only if the QMAN's CP is secured and MSG_PROT is used
49 *
50 *
51 * When MMU is enabled:
52 *
53 * QMAN DMA: PQ, CQ and CP are secured.
54 * MMU is set to bypass on the Secure props register of the QMAN.
55 * The reasons we don't enable MMU for PQ, CQ and CP are:
56 * - PQ entry is in kernel address space and KMD doesn't map it.
57 * - CP writes to MSIX register and to kernel address space (completion
58 * queue).
59 *
60 * DMA is not secured but because CP is secured, KMD still needs to parse the
61 * CB, but doesn't need to check the DMA addresses.
62 *
63 * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD
64 * doesn't map memory in MMU.
65 *
66 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
67 *
68 * DMA RR does NOT protect host because DMA is not secured
69 *
70 */
71
72#define GOYA_MMU_REGS_NUM 61
73
74#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
75
76#define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
77#define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
78#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
79#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
80#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
81#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
82#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
Omer Shpigelman0feaf862019-02-16 00:39:22 +020083#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
Omer Shpigelman3dccd182019-02-28 10:46:16 +020084#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020085
86#define GOYA_QMAN0_FENCE_VAL 0xD169B243
87
88#define GOYA_MAX_INITIATORS 20
89
Oded Gabbay1251f232019-02-16 00:39:18 +020090#define GOYA_MAX_STRING_LEN 20
91
Oded Gabbaybe5d9262019-02-16 00:39:15 +020092#define GOYA_CB_POOL_CB_CNT 512
93#define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
94
Oded Gabbay1251f232019-02-16 00:39:18 +020095static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
96 "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
97 "goya cq 4", "goya cpu eq"
98};
99
Oded Gabbayeff6f4a2019-02-16 00:39:21 +0200100static u16 goya_packet_sizes[MAX_PACKET_ID] = {
101 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
102 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
103 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
104 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
105 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
106 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
107 [PACKET_FENCE] = sizeof(struct packet_fence),
108 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
109 [PACKET_NOP] = sizeof(struct packet_nop),
110 [PACKET_STOP] = sizeof(struct packet_stop)
111};
112
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200113static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
114 mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
115 mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
116 mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
117 mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
118 mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
119 mmTPC0_QM_GLBL_SECURE_PROPS,
120 mmTPC0_QM_GLBL_NON_SECURE_PROPS,
121 mmTPC0_CMDQ_GLBL_SECURE_PROPS,
122 mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
123 mmTPC0_CFG_ARUSER,
124 mmTPC0_CFG_AWUSER,
125 mmTPC1_QM_GLBL_SECURE_PROPS,
126 mmTPC1_QM_GLBL_NON_SECURE_PROPS,
127 mmTPC1_CMDQ_GLBL_SECURE_PROPS,
128 mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
129 mmTPC1_CFG_ARUSER,
130 mmTPC1_CFG_AWUSER,
131 mmTPC2_QM_GLBL_SECURE_PROPS,
132 mmTPC2_QM_GLBL_NON_SECURE_PROPS,
133 mmTPC2_CMDQ_GLBL_SECURE_PROPS,
134 mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
135 mmTPC2_CFG_ARUSER,
136 mmTPC2_CFG_AWUSER,
137 mmTPC3_QM_GLBL_SECURE_PROPS,
138 mmTPC3_QM_GLBL_NON_SECURE_PROPS,
139 mmTPC3_CMDQ_GLBL_SECURE_PROPS,
140 mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
141 mmTPC3_CFG_ARUSER,
142 mmTPC3_CFG_AWUSER,
143 mmTPC4_QM_GLBL_SECURE_PROPS,
144 mmTPC4_QM_GLBL_NON_SECURE_PROPS,
145 mmTPC4_CMDQ_GLBL_SECURE_PROPS,
146 mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
147 mmTPC4_CFG_ARUSER,
148 mmTPC4_CFG_AWUSER,
149 mmTPC5_QM_GLBL_SECURE_PROPS,
150 mmTPC5_QM_GLBL_NON_SECURE_PROPS,
151 mmTPC5_CMDQ_GLBL_SECURE_PROPS,
152 mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
153 mmTPC5_CFG_ARUSER,
154 mmTPC5_CFG_AWUSER,
155 mmTPC6_QM_GLBL_SECURE_PROPS,
156 mmTPC6_QM_GLBL_NON_SECURE_PROPS,
157 mmTPC6_CMDQ_GLBL_SECURE_PROPS,
158 mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
159 mmTPC6_CFG_ARUSER,
160 mmTPC6_CFG_AWUSER,
161 mmTPC7_QM_GLBL_SECURE_PROPS,
162 mmTPC7_QM_GLBL_NON_SECURE_PROPS,
163 mmTPC7_CMDQ_GLBL_SECURE_PROPS,
164 mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
165 mmTPC7_CFG_ARUSER,
166 mmTPC7_CFG_AWUSER,
167 mmMME_QM_GLBL_SECURE_PROPS,
168 mmMME_QM_GLBL_NON_SECURE_PROPS,
169 mmMME_CMDQ_GLBL_SECURE_PROPS,
170 mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
171 mmMME_SBA_CONTROL_DATA,
172 mmMME_SBB_CONTROL_DATA,
173 mmMME_SBC_CONTROL_DATA,
174 mmMME_WBC_CONTROL_DATA
175};
176
Oded Gabbayb24ca452019-02-24 15:50:53 +0200177static u32 goya_all_events[] = {
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +0200178 GOYA_ASYNC_EVENT_ID_PCIE_IF,
179 GOYA_ASYNC_EVENT_ID_TPC0_ECC,
180 GOYA_ASYNC_EVENT_ID_TPC1_ECC,
181 GOYA_ASYNC_EVENT_ID_TPC2_ECC,
182 GOYA_ASYNC_EVENT_ID_TPC3_ECC,
183 GOYA_ASYNC_EVENT_ID_TPC4_ECC,
184 GOYA_ASYNC_EVENT_ID_TPC5_ECC,
185 GOYA_ASYNC_EVENT_ID_TPC6_ECC,
186 GOYA_ASYNC_EVENT_ID_TPC7_ECC,
187 GOYA_ASYNC_EVENT_ID_MME_ECC,
188 GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
189 GOYA_ASYNC_EVENT_ID_MMU_ECC,
190 GOYA_ASYNC_EVENT_ID_DMA_MACRO,
191 GOYA_ASYNC_EVENT_ID_DMA_ECC,
192 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
193 GOYA_ASYNC_EVENT_ID_PSOC_MEM,
194 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
195 GOYA_ASYNC_EVENT_ID_SRAM0,
196 GOYA_ASYNC_EVENT_ID_SRAM1,
197 GOYA_ASYNC_EVENT_ID_SRAM2,
198 GOYA_ASYNC_EVENT_ID_SRAM3,
199 GOYA_ASYNC_EVENT_ID_SRAM4,
200 GOYA_ASYNC_EVENT_ID_SRAM5,
201 GOYA_ASYNC_EVENT_ID_SRAM6,
202 GOYA_ASYNC_EVENT_ID_SRAM7,
203 GOYA_ASYNC_EVENT_ID_SRAM8,
204 GOYA_ASYNC_EVENT_ID_SRAM9,
205 GOYA_ASYNC_EVENT_ID_SRAM10,
206 GOYA_ASYNC_EVENT_ID_SRAM11,
207 GOYA_ASYNC_EVENT_ID_SRAM12,
208 GOYA_ASYNC_EVENT_ID_SRAM13,
209 GOYA_ASYNC_EVENT_ID_SRAM14,
210 GOYA_ASYNC_EVENT_ID_SRAM15,
211 GOYA_ASYNC_EVENT_ID_SRAM16,
212 GOYA_ASYNC_EVENT_ID_SRAM17,
213 GOYA_ASYNC_EVENT_ID_SRAM18,
214 GOYA_ASYNC_EVENT_ID_SRAM19,
215 GOYA_ASYNC_EVENT_ID_SRAM20,
216 GOYA_ASYNC_EVENT_ID_SRAM21,
217 GOYA_ASYNC_EVENT_ID_SRAM22,
218 GOYA_ASYNC_EVENT_ID_SRAM23,
219 GOYA_ASYNC_EVENT_ID_SRAM24,
220 GOYA_ASYNC_EVENT_ID_SRAM25,
221 GOYA_ASYNC_EVENT_ID_SRAM26,
222 GOYA_ASYNC_EVENT_ID_SRAM27,
223 GOYA_ASYNC_EVENT_ID_SRAM28,
224 GOYA_ASYNC_EVENT_ID_SRAM29,
225 GOYA_ASYNC_EVENT_ID_GIC500,
226 GOYA_ASYNC_EVENT_ID_PLL0,
227 GOYA_ASYNC_EVENT_ID_PLL1,
228 GOYA_ASYNC_EVENT_ID_PLL3,
229 GOYA_ASYNC_EVENT_ID_PLL4,
230 GOYA_ASYNC_EVENT_ID_PLL5,
231 GOYA_ASYNC_EVENT_ID_PLL6,
232 GOYA_ASYNC_EVENT_ID_AXI_ECC,
233 GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
234 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
235 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
236 GOYA_ASYNC_EVENT_ID_PCIE_DEC,
237 GOYA_ASYNC_EVENT_ID_TPC0_DEC,
238 GOYA_ASYNC_EVENT_ID_TPC1_DEC,
239 GOYA_ASYNC_EVENT_ID_TPC2_DEC,
240 GOYA_ASYNC_EVENT_ID_TPC3_DEC,
241 GOYA_ASYNC_EVENT_ID_TPC4_DEC,
242 GOYA_ASYNC_EVENT_ID_TPC5_DEC,
243 GOYA_ASYNC_EVENT_ID_TPC6_DEC,
244 GOYA_ASYNC_EVENT_ID_TPC7_DEC,
245 GOYA_ASYNC_EVENT_ID_MME_WACS,
246 GOYA_ASYNC_EVENT_ID_MME_WACSD,
247 GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
248 GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
249 GOYA_ASYNC_EVENT_ID_PSOC,
250 GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
251 GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
252 GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
253 GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
254 GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
255 GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
256 GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
257 GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
258 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
259 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
260 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
261 GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
262 GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
263 GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
264 GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
265 GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
266 GOYA_ASYNC_EVENT_ID_TPC0_QM,
267 GOYA_ASYNC_EVENT_ID_TPC1_QM,
268 GOYA_ASYNC_EVENT_ID_TPC2_QM,
269 GOYA_ASYNC_EVENT_ID_TPC3_QM,
270 GOYA_ASYNC_EVENT_ID_TPC4_QM,
271 GOYA_ASYNC_EVENT_ID_TPC5_QM,
272 GOYA_ASYNC_EVENT_ID_TPC6_QM,
273 GOYA_ASYNC_EVENT_ID_TPC7_QM,
274 GOYA_ASYNC_EVENT_ID_MME_QM,
275 GOYA_ASYNC_EVENT_ID_MME_CMDQ,
276 GOYA_ASYNC_EVENT_ID_DMA0_QM,
277 GOYA_ASYNC_EVENT_ID_DMA1_QM,
278 GOYA_ASYNC_EVENT_ID_DMA2_QM,
279 GOYA_ASYNC_EVENT_ID_DMA3_QM,
280 GOYA_ASYNC_EVENT_ID_DMA4_QM,
281 GOYA_ASYNC_EVENT_ID_DMA0_CH,
282 GOYA_ASYNC_EVENT_ID_DMA1_CH,
283 GOYA_ASYNC_EVENT_ID_DMA2_CH,
284 GOYA_ASYNC_EVENT_ID_DMA3_CH,
285 GOYA_ASYNC_EVENT_ID_DMA4_CH,
286 GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
287 GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
288 GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
289 GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
290 GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
291 GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
292 GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
293 GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
294 GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
295 GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
296 GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
297 GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
298 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
299};
300
Oded Gabbayd91389b2019-02-16 00:39:19 +0200301static int goya_armcp_info_get(struct hl_device *hdev);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200302static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
303static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200304static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200305static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
306 u64 phys_addr);
Oded Gabbayd91389b2019-02-16 00:39:19 +0200307
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200308static void goya_get_fixed_properties(struct hl_device *hdev)
309{
310 struct asic_fixed_properties *prop = &hdev->asic_prop;
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200311 int i;
312
313 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
314 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
315 prop->hw_queues_props[i].kmd_only = 0;
316 }
317
318 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
319 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
320 prop->hw_queues_props[i].kmd_only = 1;
321 }
322
323 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
324 NUMBER_OF_INT_HW_QUEUES; i++) {
325 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
326 prop->hw_queues_props[i].kmd_only = 0;
327 }
328
329 for (; i < HL_MAX_QUEUES; i++)
330 prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200331
332 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
333
334 prop->dram_base_address = DRAM_PHYS_BASE;
335 prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
336 prop->dram_end_address = prop->dram_base_address + prop->dram_size;
337 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
338
339 prop->sram_base_address = SRAM_BASE_ADDR;
340 prop->sram_size = SRAM_SIZE;
341 prop->sram_end_address = prop->sram_base_address + prop->sram_size;
342 prop->sram_user_base_address = prop->sram_base_address +
343 SRAM_USER_BASE_OFFSET;
344
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200345 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200346 prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200347 if (hdev->pldm)
348 prop->mmu_pgt_size = 0x800000; /* 8MB */
349 else
350 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
351 prop->mmu_pte_size = HL_PTE_SIZE;
352 prop->mmu_hop_table_size = HOP_TABLE_SIZE;
353 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
354 prop->dram_page_size = PAGE_SIZE_2MB;
355
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200356 prop->host_phys_base_address = HOST_PHYS_BASE;
357 prop->va_space_host_start_address = VA_HOST_SPACE_START;
358 prop->va_space_host_end_address = VA_HOST_SPACE_END;
359 prop->va_space_dram_start_address = VA_DDR_SPACE_START;
360 prop->va_space_dram_end_address = VA_DDR_SPACE_END;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200361 prop->dram_size_for_default_page_mapping =
362 prop->va_space_dram_end_address;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200363 prop->cfg_size = CFG_SIZE;
364 prop->max_asid = MAX_ASID;
Oded Gabbay1251f232019-02-16 00:39:18 +0200365 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
Oded Gabbay839c4802019-02-16 00:39:16 +0200366 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
367 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200368 prop->max_power_default = MAX_POWER_DEFAULT;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200369 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
370
371 prop->high_pll = PLL_HIGH_DEFAULT;
372}
373
374/*
375 * goya_pci_bars_map - Map PCI BARS of Goya device
376 *
377 * @hdev: pointer to hl_device structure
378 *
379 * Request PCI regions and map them to kernel virtual addresses.
380 * Returns 0 on success
381 *
382 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200383static int goya_pci_bars_map(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200384{
385 struct pci_dev *pdev = hdev->pdev;
386 int rc;
387
388 rc = pci_request_regions(pdev, HL_NAME);
389 if (rc) {
390 dev_err(hdev->dev, "Cannot obtain PCI resources\n");
391 return rc;
392 }
393
394 hdev->pcie_bar[SRAM_CFG_BAR_ID] =
395 pci_ioremap_bar(pdev, SRAM_CFG_BAR_ID);
396 if (!hdev->pcie_bar[SRAM_CFG_BAR_ID]) {
397 dev_err(hdev->dev, "pci_ioremap_bar failed for CFG\n");
398 rc = -ENODEV;
399 goto err_release_regions;
400 }
401
402 hdev->pcie_bar[MSIX_BAR_ID] = pci_ioremap_bar(pdev, MSIX_BAR_ID);
403 if (!hdev->pcie_bar[MSIX_BAR_ID]) {
404 dev_err(hdev->dev, "pci_ioremap_bar failed for MSIX\n");
405 rc = -ENODEV;
406 goto err_unmap_sram_cfg;
407 }
408
409 hdev->pcie_bar[DDR_BAR_ID] = pci_ioremap_wc_bar(pdev, DDR_BAR_ID);
410 if (!hdev->pcie_bar[DDR_BAR_ID]) {
411 dev_err(hdev->dev, "pci_ioremap_bar failed for DDR\n");
412 rc = -ENODEV;
413 goto err_unmap_msix;
414 }
415
416 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
417 (CFG_BASE - SRAM_BASE_ADDR);
418
419 return 0;
420
421err_unmap_msix:
422 iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
423err_unmap_sram_cfg:
424 iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
425err_release_regions:
426 pci_release_regions(pdev);
427
428 return rc;
429}
430
431/*
432 * goya_pci_bars_unmap - Unmap PCI BARS of Goya device
433 *
434 * @hdev: pointer to hl_device structure
435 *
436 * Release all PCI BARS and unmap their virtual addresses
437 *
438 */
439static void goya_pci_bars_unmap(struct hl_device *hdev)
440{
441 struct pci_dev *pdev = hdev->pdev;
442
443 iounmap(hdev->pcie_bar[DDR_BAR_ID]);
444 iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
445 iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
446 pci_release_regions(pdev);
447}
448
449/*
450 * goya_elbi_write - Write through the ELBI interface
451 *
452 * @hdev: pointer to hl_device structure
453 *
454 * return 0 on success, -1 on failure
455 *
456 */
457static int goya_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
458{
459 struct pci_dev *pdev = hdev->pdev;
460 ktime_t timeout;
461 u32 val;
462
463 /* Clear previous status */
464 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
465
466 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
467 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
468 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
469 PCI_CONFIG_ELBI_CTRL_WRITE);
470
471 timeout = ktime_add_ms(ktime_get(), 10);
472 for (;;) {
473 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
474 if (val & PCI_CONFIG_ELBI_STS_MASK)
475 break;
476 if (ktime_compare(ktime_get(), timeout) > 0) {
477 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
478 &val);
479 break;
480 }
481 usleep_range(300, 500);
482 }
483
484 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
485 return 0;
486
487 if (val & PCI_CONFIG_ELBI_STS_ERR) {
488 dev_err(hdev->dev, "Error writing to ELBI\n");
489 return -EIO;
490 }
491
492 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
493 dev_err(hdev->dev, "ELBI write didn't finish in time\n");
494 return -EIO;
495 }
496
497 dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
498 return -EIO;
499}
500
501/*
502 * goya_iatu_write - iatu write routine
503 *
504 * @hdev: pointer to hl_device structure
505 *
506 */
507static int goya_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
508{
509 u32 dbi_offset;
510 int rc;
511
512 dbi_offset = addr & 0xFFF;
513
514 rc = goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0x00300000);
515 rc |= goya_elbi_write(hdev, mmPCIE_DBI_BASE + dbi_offset, data);
516
517 if (rc)
518 return -EIO;
519
520 return 0;
521}
522
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200523static void goya_reset_link_through_bridge(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200524{
525 struct pci_dev *pdev = hdev->pdev;
526 struct pci_dev *parent_port;
527 u16 val;
528
529 parent_port = pdev->bus->self;
530 pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
531 val |= PCI_BRIDGE_CTL_BUS_RESET;
532 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
533 ssleep(1);
534
535 val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
536 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
537 ssleep(3);
538}
539
540/*
541 * goya_set_ddr_bar_base - set DDR bar to map specific device address
542 *
543 * @hdev: pointer to hl_device structure
544 * @addr: address in DDR. Must be aligned to DDR bar size
545 *
546 * This function configures the iATU so that the DDR bar will start at the
547 * specified addr.
548 *
549 */
550static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
551{
552 struct goya_device *goya = hdev->asic_specific;
553 int rc;
554
555 if ((goya) && (goya->ddr_bar_cur_addr == addr))
556 return 0;
557
558 /* Inbound Region 1 - Bar 4 - Point to DDR */
559 rc = goya_iatu_write(hdev, 0x314, lower_32_bits(addr));
560 rc |= goya_iatu_write(hdev, 0x318, upper_32_bits(addr));
561 rc |= goya_iatu_write(hdev, 0x300, 0);
562 /* Enable + Bar match + match enable + Bar 4 */
563 rc |= goya_iatu_write(hdev, 0x304, 0xC0080400);
564
565 /* Return the DBI window to the default location */
566 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
567 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
568
569 if (rc) {
570 dev_err(hdev->dev, "failed to map DDR bar to 0x%08llx\n", addr);
571 return -EIO;
572 }
573
574 if (goya)
575 goya->ddr_bar_cur_addr = addr;
576
577 return 0;
578}
579
580/*
581 * goya_init_iatu - Initialize the iATU unit inside the PCI controller
582 *
583 * @hdev: pointer to hl_device structure
584 *
585 * This is needed in case the firmware doesn't initialize the iATU
586 *
587 */
588static int goya_init_iatu(struct hl_device *hdev)
589{
590 int rc;
591
592 /* Inbound Region 0 - Bar 0 - Point to SRAM_BASE_ADDR */
593 rc = goya_iatu_write(hdev, 0x114, lower_32_bits(SRAM_BASE_ADDR));
594 rc |= goya_iatu_write(hdev, 0x118, upper_32_bits(SRAM_BASE_ADDR));
595 rc |= goya_iatu_write(hdev, 0x100, 0);
596 /* Enable + Bar match + match enable */
597 rc |= goya_iatu_write(hdev, 0x104, 0xC0080000);
598
599 /* Inbound Region 1 - Bar 4 - Point to DDR */
600 rc |= goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
601
602 /* Outbound Region 0 - Point to Host */
603 rc |= goya_iatu_write(hdev, 0x008, lower_32_bits(HOST_PHYS_BASE));
604 rc |= goya_iatu_write(hdev, 0x00C, upper_32_bits(HOST_PHYS_BASE));
605 rc |= goya_iatu_write(hdev, 0x010,
606 lower_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
607 rc |= goya_iatu_write(hdev, 0x014, 0);
608 rc |= goya_iatu_write(hdev, 0x018, 0);
609 rc |= goya_iatu_write(hdev, 0x020,
610 upper_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
611 /* Increase region size */
612 rc |= goya_iatu_write(hdev, 0x000, 0x00002000);
613 /* Enable */
614 rc |= goya_iatu_write(hdev, 0x004, 0x80000000);
615
616 /* Return the DBI window to the default location */
617 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
618 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
619
620 if (rc)
621 return -EIO;
622
623 return 0;
624}
625
626/*
627 * goya_early_init - GOYA early initialization code
628 *
629 * @hdev: pointer to hl_device structure
630 *
631 * Verify PCI bars
632 * Set DMA masks
633 * PCI controller initialization
634 * Map PCI bars
635 *
636 */
637static int goya_early_init(struct hl_device *hdev)
638{
639 struct asic_fixed_properties *prop = &hdev->asic_prop;
640 struct pci_dev *pdev = hdev->pdev;
641 u32 val;
642 int rc;
643
644 goya_get_fixed_properties(hdev);
645
646 /* Check BAR sizes */
647 if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
648 dev_err(hdev->dev,
649 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
650 SRAM_CFG_BAR_ID,
651 (unsigned long long) pci_resource_len(pdev,
652 SRAM_CFG_BAR_ID),
653 CFG_BAR_SIZE);
654 return -ENODEV;
655 }
656
657 if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
658 dev_err(hdev->dev,
659 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
660 MSIX_BAR_ID,
661 (unsigned long long) pci_resource_len(pdev,
662 MSIX_BAR_ID),
663 MSIX_BAR_SIZE);
664 return -ENODEV;
665 }
666
667 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
668
669 /* set DMA mask for GOYA */
670 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
671 if (rc) {
672 dev_warn(hdev->dev, "Unable to set pci dma mask to 39 bits\n");
673 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
674 if (rc) {
675 dev_err(hdev->dev,
676 "Unable to set pci dma mask to 32 bits\n");
677 return rc;
678 }
679 }
680
681 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
682 if (rc) {
683 dev_warn(hdev->dev,
684 "Unable to set pci consistent dma mask to 39 bits\n");
685 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
686 if (rc) {
687 dev_err(hdev->dev,
688 "Unable to set pci consistent dma mask to 32 bits\n");
689 return rc;
690 }
691 }
692
693 if (hdev->reset_pcilink)
694 goya_reset_link_through_bridge(hdev);
695
696 rc = pci_enable_device_mem(pdev);
697 if (rc) {
698 dev_err(hdev->dev, "can't enable PCI device\n");
699 return rc;
700 }
701
702 pci_set_master(pdev);
703
704 rc = goya_init_iatu(hdev);
705 if (rc) {
706 dev_err(hdev->dev, "Failed to initialize iATU\n");
707 goto disable_device;
708 }
709
710 rc = goya_pci_bars_map(hdev);
711 if (rc) {
712 dev_err(hdev->dev, "Failed to initialize PCI BARS\n");
713 goto disable_device;
714 }
715
Oded Gabbay839c4802019-02-16 00:39:16 +0200716 if (!hdev->pldm) {
717 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
718 if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
719 dev_warn(hdev->dev,
720 "PCI strap is not configured correctly, PCI bus errors may occur\n");
721 }
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200722
723 return 0;
724
725disable_device:
726 pci_clear_master(pdev);
727 pci_disable_device(pdev);
728
729 return rc;
730}
731
732/*
733 * goya_early_fini - GOYA early finalization code
734 *
735 * @hdev: pointer to hl_device structure
736 *
737 * Unmap PCI bars
738 *
739 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200740static int goya_early_fini(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200741{
742 goya_pci_bars_unmap(hdev);
743
744 pci_clear_master(hdev->pdev);
745 pci_disable_device(hdev->pdev);
746
747 return 0;
748}
749
750/*
Oded Gabbayd91389b2019-02-16 00:39:19 +0200751 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
752 *
753 * @hdev: pointer to hl_device structure
754 *
755 */
756static void goya_fetch_psoc_frequency(struct hl_device *hdev)
757{
758 struct asic_fixed_properties *prop = &hdev->asic_prop;
759
760 prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
761 prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
762 prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
763 prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
764}
765
766/*
767 * goya_late_init - GOYA late initialization code
768 *
769 * @hdev: pointer to hl_device structure
770 *
771 * Get ArmCP info and send message to CPU to enable PCI access
772 */
773static int goya_late_init(struct hl_device *hdev)
774{
775 struct asic_fixed_properties *prop = &hdev->asic_prop;
776 struct goya_device *goya = hdev->asic_specific;
777 int rc;
778
779 rc = goya->armcp_info_get(hdev);
780 if (rc) {
781 dev_err(hdev->dev, "Failed to get armcp info\n");
782 return rc;
783 }
784
785 /* Now that we have the DRAM size in ASIC prop, we need to check
786 * its size and configure the DMA_IF DDR wrap protection (which is in
787 * the MMU block) accordingly. The value is the log2 of the DRAM size
788 */
789 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
790
Tomer Tayar3110c602019-03-04 10:22:09 +0200791 rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
Oded Gabbayd91389b2019-02-16 00:39:19 +0200792 if (rc) {
793 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
794 return rc;
795 }
796
797 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
798 GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
799
800 goya_fetch_psoc_frequency(hdev);
801
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200802 rc = goya_mmu_clear_pgt_range(hdev);
803 if (rc) {
804 dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
805 goto disable_pci_access;
806 }
807
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200808 rc = goya_mmu_set_dram_default_page(hdev);
809 if (rc) {
810 dev_err(hdev->dev, "Failed to set DRAM default page\n");
811 goto disable_pci_access;
812 }
813
Oded Gabbayd91389b2019-02-16 00:39:19 +0200814 return 0;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200815
816disable_pci_access:
Tomer Tayar3110c602019-03-04 10:22:09 +0200817 hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200818
819 return rc;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200820}
821
822/*
823 * goya_late_fini - GOYA late tear-down code
824 *
825 * @hdev: pointer to hl_device structure
826 *
827 * Free sensors allocated structures
828 */
829void goya_late_fini(struct hl_device *hdev)
830{
831 const struct hwmon_channel_info **channel_info_arr;
832 int i = 0;
833
834 if (!hdev->hl_chip_info->info)
835 return;
836
837 channel_info_arr = hdev->hl_chip_info->info;
838
839 while (channel_info_arr[i]) {
840 kfree(channel_info_arr[i]->config);
841 kfree(channel_info_arr[i]);
842 i++;
843 }
844
845 kfree(channel_info_arr);
846
847 hdev->hl_chip_info->info = NULL;
848}
849
850/*
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200851 * goya_sw_init - Goya software initialization code
852 *
853 * @hdev: pointer to hl_device structure
854 *
855 */
856static int goya_sw_init(struct hl_device *hdev)
857{
858 struct goya_device *goya;
859 int rc;
860
861 /* Allocate device structure */
862 goya = kzalloc(sizeof(*goya), GFP_KERNEL);
863 if (!goya)
864 return -ENOMEM;
865
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200866 goya->test_cpu_queue = goya_test_cpu_queue;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200867 goya->armcp_info_get = goya_armcp_info_get;
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200868
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200869 /* according to goya_init_iatu */
870 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200871
872 goya->mme_clk = GOYA_PLL_FREQ_LOW;
873 goya->tpc_clk = GOYA_PLL_FREQ_LOW;
874 goya->ic_clk = GOYA_PLL_FREQ_LOW;
875
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200876 hdev->asic_specific = goya;
877
878 /* Create DMA pool for small allocations */
879 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
880 &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
881 if (!hdev->dma_pool) {
882 dev_err(hdev->dev, "failed to create DMA pool\n");
883 rc = -ENOMEM;
884 goto free_goya_device;
885 }
886
887 hdev->cpu_accessible_dma_mem =
888 hdev->asic_funcs->dma_alloc_coherent(hdev,
Tomer Tayar3110c602019-03-04 10:22:09 +0200889 HL_CPU_ACCESSIBLE_MEM_SIZE,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200890 &hdev->cpu_accessible_dma_address,
891 GFP_KERNEL | __GFP_ZERO);
892
893 if (!hdev->cpu_accessible_dma_mem) {
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200894 rc = -ENOMEM;
895 goto free_dma_pool;
896 }
897
Tomer Tayar3110c602019-03-04 10:22:09 +0200898 hdev->cpu_accessible_dma_pool = gen_pool_create(HL_CPU_PKT_SHIFT, -1);
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200899 if (!hdev->cpu_accessible_dma_pool) {
900 dev_err(hdev->dev,
901 "Failed to create CPU accessible DMA pool\n");
902 rc = -ENOMEM;
903 goto free_cpu_pq_dma_mem;
904 }
905
906 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
907 (uintptr_t) hdev->cpu_accessible_dma_mem,
Tomer Tayar3110c602019-03-04 10:22:09 +0200908 HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200909 if (rc) {
910 dev_err(hdev->dev,
911 "Failed to add memory to CPU accessible DMA pool\n");
912 rc = -EFAULT;
913 goto free_cpu_pq_pool;
914 }
915
916 spin_lock_init(&goya->hw_queues_lock);
917
918 return 0;
919
920free_cpu_pq_pool:
921 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
922free_cpu_pq_dma_mem:
Tomer Tayar3110c602019-03-04 10:22:09 +0200923 hdev->asic_funcs->dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200924 hdev->cpu_accessible_dma_mem,
925 hdev->cpu_accessible_dma_address);
926free_dma_pool:
927 dma_pool_destroy(hdev->dma_pool);
928free_goya_device:
929 kfree(goya);
930
931 return rc;
932}
933
934/*
935 * goya_sw_fini - Goya software tear-down code
936 *
937 * @hdev: pointer to hl_device structure
938 *
939 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200940static int goya_sw_fini(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200941{
942 struct goya_device *goya = hdev->asic_specific;
943
944 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
945
Tomer Tayar3110c602019-03-04 10:22:09 +0200946 hdev->asic_funcs->dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200947 hdev->cpu_accessible_dma_mem,
948 hdev->cpu_accessible_dma_address);
949
950 dma_pool_destroy(hdev->dma_pool);
951
952 kfree(goya);
953
954 return 0;
955}
956
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200957static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
958 dma_addr_t bus_address)
959{
960 struct goya_device *goya = hdev->asic_specific;
961 u32 mtr_base_lo, mtr_base_hi;
962 u32 so_base_lo, so_base_hi;
963 u32 gic_base_lo, gic_base_hi;
964 u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
965
966 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
967 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
968 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
969 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
970
971 gic_base_lo =
972 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
973 gic_base_hi =
974 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
975
976 WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
977 WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
978
979 WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
980 WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
981 WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
982
983 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
984 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
985 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
986 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
987 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
988 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
989 WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
990 GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
991
992 /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
993 WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
994 WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
995
Oded Gabbay1251f232019-02-16 00:39:18 +0200996 if (goya->hw_cap_initialized & HW_CAP_MMU)
997 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200998 else
Oded Gabbay1251f232019-02-16 00:39:18 +0200999 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001000
1001 WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN);
1002 WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
1003}
1004
1005static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
1006{
1007 u32 gic_base_lo, gic_base_hi;
1008 u64 sob_addr;
1009 u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
1010
1011 gic_base_lo =
1012 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1013 gic_base_hi =
1014 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1015
1016 WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
1017 WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
1018 WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
1019 GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
1020
Oded Gabbay887f7d32019-02-28 10:46:15 +02001021 if (dma_id)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001022 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
1023 (dma_id - 1) * 4;
Oded Gabbay887f7d32019-02-28 10:46:15 +02001024 else
1025 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
1026
1027 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + reg_off, lower_32_bits(sob_addr));
1028 WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
1029 WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001030}
1031
1032/*
1033 * goya_init_dma_qmans - Initialize QMAN DMA registers
1034 *
1035 * @hdev: pointer to hl_device structure
1036 *
1037 * Initialize the H/W registers of the QMAN DMA channels
1038 *
1039 */
1040static void goya_init_dma_qmans(struct hl_device *hdev)
1041{
1042 struct goya_device *goya = hdev->asic_specific;
1043 struct hl_hw_queue *q;
1044 dma_addr_t bus_address;
1045 int i;
1046
1047 if (goya->hw_cap_initialized & HW_CAP_DMA)
1048 return;
1049
1050 q = &hdev->kernel_queues[0];
1051
1052 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
1053 bus_address = q->bus_address +
1054 hdev->asic_prop.host_phys_base_address;
1055
1056 goya_init_dma_qman(hdev, i, bus_address);
1057 goya_init_dma_ch(hdev, i);
1058 }
1059
1060 goya->hw_cap_initialized |= HW_CAP_DMA;
1061}
1062
1063/*
1064 * goya_disable_external_queues - Disable external queues
1065 *
1066 * @hdev: pointer to hl_device structure
1067 *
1068 */
1069static void goya_disable_external_queues(struct hl_device *hdev)
1070{
1071 WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
1072 WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
1073 WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
1074 WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
1075 WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
1076}
1077
1078static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
1079 u32 cp_sts_reg, u32 glbl_sts0_reg)
1080{
1081 int rc;
1082 u32 status;
1083
1084 /* use the values of TPC0 as they are all the same*/
1085
1086 WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
1087
1088 status = RREG32(cp_sts_reg);
1089 if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
1090 rc = hl_poll_timeout(
1091 hdev,
1092 cp_sts_reg,
1093 status,
1094 !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
1095 1000,
1096 QMAN_FENCE_TIMEOUT_USEC);
1097
1098 /* if QMAN is stuck in fence no need to check for stop */
1099 if (rc)
1100 return 0;
1101 }
1102
1103 rc = hl_poll_timeout(
1104 hdev,
1105 glbl_sts0_reg,
1106 status,
1107 (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
1108 1000,
1109 QMAN_STOP_TIMEOUT_USEC);
1110
1111 if (rc) {
1112 dev_err(hdev->dev,
1113 "Timeout while waiting for QMAN to stop\n");
1114 return -EINVAL;
1115 }
1116
1117 return 0;
1118}
1119
1120/*
1121 * goya_stop_external_queues - Stop external queues
1122 *
1123 * @hdev: pointer to hl_device structure
1124 *
1125 * Returns 0 on success
1126 *
1127 */
1128static int goya_stop_external_queues(struct hl_device *hdev)
1129{
1130 int rc, retval = 0;
1131
1132 rc = goya_stop_queue(hdev,
1133 mmDMA_QM_0_GLBL_CFG1,
1134 mmDMA_QM_0_CP_STS,
1135 mmDMA_QM_0_GLBL_STS0);
1136
1137 if (rc) {
1138 dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
1139 retval = -EIO;
1140 }
1141
1142 rc = goya_stop_queue(hdev,
1143 mmDMA_QM_1_GLBL_CFG1,
1144 mmDMA_QM_1_CP_STS,
1145 mmDMA_QM_1_GLBL_STS0);
1146
1147 if (rc) {
1148 dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
1149 retval = -EIO;
1150 }
1151
1152 rc = goya_stop_queue(hdev,
1153 mmDMA_QM_2_GLBL_CFG1,
1154 mmDMA_QM_2_CP_STS,
1155 mmDMA_QM_2_GLBL_STS0);
1156
1157 if (rc) {
1158 dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
1159 retval = -EIO;
1160 }
1161
1162 rc = goya_stop_queue(hdev,
1163 mmDMA_QM_3_GLBL_CFG1,
1164 mmDMA_QM_3_CP_STS,
1165 mmDMA_QM_3_GLBL_STS0);
1166
1167 if (rc) {
1168 dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
1169 retval = -EIO;
1170 }
1171
1172 rc = goya_stop_queue(hdev,
1173 mmDMA_QM_4_GLBL_CFG1,
1174 mmDMA_QM_4_CP_STS,
1175 mmDMA_QM_4_GLBL_STS0);
1176
1177 if (rc) {
1178 dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
1179 retval = -EIO;
1180 }
1181
1182 return retval;
1183}
1184
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001185/*
1186 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1187 *
1188 * @hdev: pointer to hl_device structure
1189 *
1190 * Returns 0 on success
1191 *
1192 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +02001193static int goya_init_cpu_queues(struct hl_device *hdev)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001194{
1195 struct goya_device *goya = hdev->asic_specific;
Oded Gabbay1251f232019-02-16 00:39:18 +02001196 struct hl_eq *eq;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001197 dma_addr_t bus_address;
1198 u32 status;
1199 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
1200 int err;
1201
1202 if (!hdev->cpu_queues_enable)
1203 return 0;
1204
1205 if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
1206 return 0;
1207
Oded Gabbay1251f232019-02-16 00:39:18 +02001208 eq = &hdev->event_queue;
1209
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001210 bus_address = cpu_pq->bus_address +
1211 hdev->asic_prop.host_phys_base_address;
1212 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, lower_32_bits(bus_address));
1213 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1, upper_32_bits(bus_address));
1214
Oded Gabbay1251f232019-02-16 00:39:18 +02001215 bus_address = eq->bus_address + hdev->asic_prop.host_phys_base_address;
1216 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(bus_address));
1217 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(bus_address));
1218
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001219 bus_address = hdev->cpu_accessible_dma_address +
1220 hdev->asic_prop.host_phys_base_address;
1221 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, lower_32_bits(bus_address));
1222 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, upper_32_bits(bus_address));
1223
1224 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
Oded Gabbay1251f232019-02-16 00:39:18 +02001225 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
Tomer Tayar3110c602019-03-04 10:22:09 +02001226 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, HL_CPU_ACCESSIBLE_MEM_SIZE);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001227
1228 /* Used for EQ CI */
1229 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
1230
1231 WREG32(mmCPU_IF_PF_PQ_PI, 0);
1232
1233 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP);
1234
1235 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1236 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1237
1238 err = hl_poll_timeout(
1239 hdev,
1240 mmPSOC_GLOBAL_CONF_SCRATCHPAD_7,
1241 status,
1242 (status == PQ_INIT_STATUS_READY_FOR_HOST),
1243 1000,
1244 GOYA_CPU_TIMEOUT_USEC);
1245
1246 if (err) {
1247 dev_err(hdev->dev,
1248 "Failed to communicate with ARM CPU (ArmCP timeout)\n");
1249 return -EIO;
1250 }
1251
1252 goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1253 return 0;
1254}
1255
Oded Gabbay839c4802019-02-16 00:39:16 +02001256static void goya_set_pll_refclk(struct hl_device *hdev)
1257{
1258 WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1259 WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1260 WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1261 WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1262
1263 WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1264 WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1265 WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1266 WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1267
1268 WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1269 WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1270 WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1271 WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1272
1273 WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1274 WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1275 WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1276 WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1277
1278 WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1279 WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1280 WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1281 WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1282
1283 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1284 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1285 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1286 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1287
1288 WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1289 WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1290 WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1291 WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1292}
1293
1294static void goya_disable_clk_rlx(struct hl_device *hdev)
1295{
1296 WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1297 WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1298}
1299
1300static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1301{
1302 u64 tpc_eml_address;
1303 u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1304 int err, slm_index;
1305
1306 tpc_offset = tpc_id * 0x40000;
1307 tpc_eml_offset = tpc_id * 0x200000;
1308 tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1309 tpc_slm_offset = tpc_eml_address + 0x100000;
1310
1311 /*
1312 * Workaround for Bug H2 #2443 :
1313 * "TPC SB is not initialized on chip reset"
1314 */
1315
1316 val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1317 if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1318 dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1319 tpc_id);
1320
1321 WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1322
1323 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1324 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1325 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1326 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1327 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1328 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1329 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1330 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1331 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1332 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1333
1334 WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1335 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1336
1337 err = hl_poll_timeout(
1338 hdev,
1339 mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1340 val,
1341 (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1342 1000,
1343 HL_DEVICE_TIMEOUT_USEC);
1344
1345 if (err)
1346 dev_err(hdev->dev,
1347 "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1348
1349 WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1350 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1351
1352 msleep(GOYA_RESET_WAIT_MSEC);
1353
1354 WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1355 ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1356
1357 msleep(GOYA_RESET_WAIT_MSEC);
1358
1359 for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1360 WREG32(tpc_slm_offset + (slm_index << 2), 0);
1361
1362 val = RREG32(tpc_slm_offset);
1363}
1364
1365static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1366{
1367 struct goya_device *goya = hdev->asic_specific;
1368 int i;
1369
1370 if (hdev->pldm)
1371 return;
1372
1373 if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1374 return;
1375
1376 /* Workaround for H2 #2443 */
1377
1378 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1379 _goya_tpc_mbist_workaround(hdev, i);
1380
1381 goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1382}
1383
1384/*
1385 * goya_init_golden_registers - Initialize golden registers
1386 *
1387 * @hdev: pointer to hl_device structure
1388 *
1389 * Initialize the H/W registers of the device
1390 *
1391 */
1392static void goya_init_golden_registers(struct hl_device *hdev)
1393{
1394 struct goya_device *goya = hdev->asic_specific;
1395 u32 polynom[10], tpc_intr_mask, offset;
1396 int i;
1397
1398 if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1399 return;
1400
1401 polynom[0] = 0x00020080;
1402 polynom[1] = 0x00401000;
1403 polynom[2] = 0x00200800;
1404 polynom[3] = 0x00002000;
1405 polynom[4] = 0x00080200;
1406 polynom[5] = 0x00040100;
1407 polynom[6] = 0x00100400;
1408 polynom[7] = 0x00004000;
1409 polynom[8] = 0x00010000;
1410 polynom[9] = 0x00008000;
1411
1412 /* Mask all arithmetic interrupts from TPC */
1413 tpc_intr_mask = 0x7FFF;
1414
1415 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1416 WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1417 WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1418 WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1419 WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1420 WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1421
1422 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1423 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1424 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1425 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1426 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1427
1428
1429 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1430 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1431 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1432 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1433 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1434
1435 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1436 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1437 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1438 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1439 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1440
1441 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1442 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1443 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1444 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1445 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1446
1447 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1448 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1449 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1450 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1451 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1452 }
1453
1454 WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1455 WREG32(mmMME_AGU, 0x0f0f0f10);
1456 WREG32(mmMME_SEI_MASK, ~0x0);
1457
1458 WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1459 WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1460 WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1461 WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1462 WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1463 WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1464 WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1465 WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1466 WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1467 WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1468 WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1469 WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1470 WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1471 WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1472 WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1473 WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1474 WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1475 WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1476 WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1477 WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1478 WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1479 WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1480 WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1481 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1482 WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1483 WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1484 WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1485 WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1486 WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1487 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1488 WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1489 WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1490 WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1491 WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1492 WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1493 WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1494 WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1495 WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1496 WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1497 WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1498 WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1499 WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1500 WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1501 WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1502 WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1503 WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1504 WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1505 WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1506 WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1507 WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1508 WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1509 WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1510 WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1511 WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1512 WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1513 WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1514 WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1515 WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1516 WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1517 WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1518 WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1519 WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1520 WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1521 WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1522 WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1523 WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1524 WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1525 WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1526 WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1527 WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1528 WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1529 WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1530 WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1531 WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1532 WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1533 WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1534 WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1535 WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1536 WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1537 WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1538 WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1539 WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1540 WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1541 WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1542
1543 WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1544 WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1545 WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1546 WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1547 WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1548 WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1549 WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1550 WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1551 WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1552 WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1553 WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1554 WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1555
1556 WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1557 WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1558 WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1559 WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1560 WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1561 WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1562 WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1563 WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1564 WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1565 WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1566 WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1567 WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1568
1569 WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1570 WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1571 WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1572 WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1573 WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1574 WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1575 WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1576 WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1577 WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1578 WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1579 WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1580 WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1581
1582 WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1583 WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1584 WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1585 WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1586 WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1587 WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1588 WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1589 WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1590 WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1591 WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1592 WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1593 WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1594
1595 WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1596 WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1597 WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1598 WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1599 WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1600 WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1601 WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1602 WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1603 WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1604 WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1605 WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1606 WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1607
1608 WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1609 WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1610 WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1611 WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1612 WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1613 WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1614 WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1615 WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1616 WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1617 WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1618 WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1619 WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1620
1621 for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1622 WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1623 WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1624 WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1625 WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1626 WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1627 WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1628
1629 WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1630 WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1631 WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1632 WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1633 WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1634 WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1635 WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1636 WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1637
1638 WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1639 WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1640 }
1641
1642 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1643 WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1644 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1645 WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1646 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1647 }
1648
1649 for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1650 /*
1651 * Workaround for Bug H2 #2441 :
1652 * "ST.NOP set trace event illegal opcode"
1653 */
1654 WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1655
1656 WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1657 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1658 WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1659 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1660 }
1661
1662 WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1663 WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1664 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1665
1666 WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1667 WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1668 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1669
1670 /*
1671 * Workaround for H2 #HW-23 bug
1672 * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
1673 * to 16 on KMD DMA
1674 * We need to limit only these DMAs because the user can only read
1675 * from Host using DMA CH 1
1676 */
1677 WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
1678 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1679
1680 goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1681}
1682
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001683static void goya_init_mme_qman(struct hl_device *hdev)
1684{
1685 u32 mtr_base_lo, mtr_base_hi;
1686 u32 so_base_lo, so_base_hi;
1687 u32 gic_base_lo, gic_base_hi;
1688 u64 qman_base_addr;
1689
1690 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1691 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1692 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1693 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1694
1695 gic_base_lo =
1696 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1697 gic_base_hi =
1698 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1699
1700 qman_base_addr = hdev->asic_prop.sram_base_address +
1701 MME_QMAN_BASE_OFFSET;
1702
1703 WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1704 WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1705 WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1706 WREG32(mmMME_QM_PQ_PI, 0);
1707 WREG32(mmMME_QM_PQ_CI, 0);
1708 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1709 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1710 WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1711 WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1712
1713 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1714 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1715 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1716 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1717
1718 /* QMAN CQ has 8 cache lines */
1719 WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1720
1721 WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1722 WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1723
1724 WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1725
1726 WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1727
1728 WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1729
1730 WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1731}
1732
1733static void goya_init_mme_cmdq(struct hl_device *hdev)
1734{
1735 u32 mtr_base_lo, mtr_base_hi;
1736 u32 so_base_lo, so_base_hi;
1737 u32 gic_base_lo, gic_base_hi;
1738 u64 qman_base_addr;
1739
1740 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1741 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1742 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1743 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1744
1745 gic_base_lo =
1746 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1747 gic_base_hi =
1748 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1749
1750 qman_base_addr = hdev->asic_prop.sram_base_address +
1751 MME_QMAN_BASE_OFFSET;
1752
1753 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1754 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1755 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1756 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1757
1758 /* CMDQ CQ has 20 cache lines */
1759 WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1760
1761 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1762 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1763
1764 WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1765
1766 WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1767
1768 WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1769
1770 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1771}
1772
1773static void goya_init_mme_qmans(struct hl_device *hdev)
1774{
1775 struct goya_device *goya = hdev->asic_specific;
1776 u32 so_base_lo, so_base_hi;
1777
1778 if (goya->hw_cap_initialized & HW_CAP_MME)
1779 return;
1780
1781 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1782 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1783
1784 WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1785 WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1786
1787 goya_init_mme_qman(hdev);
1788 goya_init_mme_cmdq(hdev);
1789
1790 goya->hw_cap_initialized |= HW_CAP_MME;
1791}
1792
1793static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1794{
1795 u32 mtr_base_lo, mtr_base_hi;
1796 u32 so_base_lo, so_base_hi;
1797 u32 gic_base_lo, gic_base_hi;
1798 u64 qman_base_addr;
1799 u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1800
1801 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1802 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1803 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1804 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1805
1806 gic_base_lo =
1807 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1808 gic_base_hi =
1809 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1810
1811 qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1812
1813 WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1814 WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1815 WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1816 WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1817 WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1818 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1819 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1820 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1821 WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1822
1823 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1824 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1825 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1826 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1827
1828 WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1829
1830 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1831 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1832
1833 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1834 GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1835
1836 WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1837
1838 WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1839
1840 WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1841}
1842
1843static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1844{
1845 u32 mtr_base_lo, mtr_base_hi;
1846 u32 so_base_lo, so_base_hi;
1847 u32 gic_base_lo, gic_base_hi;
1848 u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1849
1850 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1851 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1852 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1853 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1854
1855 gic_base_lo =
1856 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1857 gic_base_hi =
1858 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1859
1860 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1861 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1862 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1863 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1864
1865 WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
1866
1867 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1868 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1869
1870 WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
1871 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
1872
1873 WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
1874
1875 WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
1876
1877 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
1878}
1879
1880static void goya_init_tpc_qmans(struct hl_device *hdev)
1881{
1882 struct goya_device *goya = hdev->asic_specific;
1883 u32 so_base_lo, so_base_hi;
1884 u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
1885 mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
1886 int i;
1887
1888 if (goya->hw_cap_initialized & HW_CAP_TPC)
1889 return;
1890
1891 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1892 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1893
1894 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
1895 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
1896 so_base_lo);
1897 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
1898 so_base_hi);
1899 }
1900
1901 goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
1902 goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
1903 goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
1904 goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
1905 goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
1906 goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
1907 goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
1908 goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
1909
1910 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1911 goya_init_tpc_cmdq(hdev, i);
1912
1913 goya->hw_cap_initialized |= HW_CAP_TPC;
1914}
1915
1916/*
1917 * goya_disable_internal_queues - Disable internal queues
1918 *
1919 * @hdev: pointer to hl_device structure
1920 *
1921 */
1922static void goya_disable_internal_queues(struct hl_device *hdev)
1923{
1924 WREG32(mmMME_QM_GLBL_CFG0, 0);
1925 WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
1926
1927 WREG32(mmTPC0_QM_GLBL_CFG0, 0);
1928 WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
1929
1930 WREG32(mmTPC1_QM_GLBL_CFG0, 0);
1931 WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
1932
1933 WREG32(mmTPC2_QM_GLBL_CFG0, 0);
1934 WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
1935
1936 WREG32(mmTPC3_QM_GLBL_CFG0, 0);
1937 WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
1938
1939 WREG32(mmTPC4_QM_GLBL_CFG0, 0);
1940 WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
1941
1942 WREG32(mmTPC5_QM_GLBL_CFG0, 0);
1943 WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
1944
1945 WREG32(mmTPC6_QM_GLBL_CFG0, 0);
1946 WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
1947
1948 WREG32(mmTPC7_QM_GLBL_CFG0, 0);
1949 WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
1950}
1951
1952/*
1953 * goya_stop_internal_queues - Stop internal queues
1954 *
1955 * @hdev: pointer to hl_device structure
1956 *
1957 * Returns 0 on success
1958 *
1959 */
1960static int goya_stop_internal_queues(struct hl_device *hdev)
1961{
1962 int rc, retval = 0;
1963
1964 /*
1965 * Each queue (QMAN) is a separate H/W logic. That means that each
1966 * QMAN can be stopped independently and failure to stop one does NOT
1967 * mandate we should not try to stop other QMANs
1968 */
1969
1970 rc = goya_stop_queue(hdev,
1971 mmMME_QM_GLBL_CFG1,
1972 mmMME_QM_CP_STS,
1973 mmMME_QM_GLBL_STS0);
1974
1975 if (rc) {
1976 dev_err(hdev->dev, "failed to stop MME QMAN\n");
1977 retval = -EIO;
1978 }
1979
1980 rc = goya_stop_queue(hdev,
1981 mmMME_CMDQ_GLBL_CFG1,
1982 mmMME_CMDQ_CP_STS,
1983 mmMME_CMDQ_GLBL_STS0);
1984
1985 if (rc) {
1986 dev_err(hdev->dev, "failed to stop MME CMDQ\n");
1987 retval = -EIO;
1988 }
1989
1990 rc = goya_stop_queue(hdev,
1991 mmTPC0_QM_GLBL_CFG1,
1992 mmTPC0_QM_CP_STS,
1993 mmTPC0_QM_GLBL_STS0);
1994
1995 if (rc) {
1996 dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
1997 retval = -EIO;
1998 }
1999
2000 rc = goya_stop_queue(hdev,
2001 mmTPC0_CMDQ_GLBL_CFG1,
2002 mmTPC0_CMDQ_CP_STS,
2003 mmTPC0_CMDQ_GLBL_STS0);
2004
2005 if (rc) {
2006 dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
2007 retval = -EIO;
2008 }
2009
2010 rc = goya_stop_queue(hdev,
2011 mmTPC1_QM_GLBL_CFG1,
2012 mmTPC1_QM_CP_STS,
2013 mmTPC1_QM_GLBL_STS0);
2014
2015 if (rc) {
2016 dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
2017 retval = -EIO;
2018 }
2019
2020 rc = goya_stop_queue(hdev,
2021 mmTPC1_CMDQ_GLBL_CFG1,
2022 mmTPC1_CMDQ_CP_STS,
2023 mmTPC1_CMDQ_GLBL_STS0);
2024
2025 if (rc) {
2026 dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
2027 retval = -EIO;
2028 }
2029
2030 rc = goya_stop_queue(hdev,
2031 mmTPC2_QM_GLBL_CFG1,
2032 mmTPC2_QM_CP_STS,
2033 mmTPC2_QM_GLBL_STS0);
2034
2035 if (rc) {
2036 dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
2037 retval = -EIO;
2038 }
2039
2040 rc = goya_stop_queue(hdev,
2041 mmTPC2_CMDQ_GLBL_CFG1,
2042 mmTPC2_CMDQ_CP_STS,
2043 mmTPC2_CMDQ_GLBL_STS0);
2044
2045 if (rc) {
2046 dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
2047 retval = -EIO;
2048 }
2049
2050 rc = goya_stop_queue(hdev,
2051 mmTPC3_QM_GLBL_CFG1,
2052 mmTPC3_QM_CP_STS,
2053 mmTPC3_QM_GLBL_STS0);
2054
2055 if (rc) {
2056 dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
2057 retval = -EIO;
2058 }
2059
2060 rc = goya_stop_queue(hdev,
2061 mmTPC3_CMDQ_GLBL_CFG1,
2062 mmTPC3_CMDQ_CP_STS,
2063 mmTPC3_CMDQ_GLBL_STS0);
2064
2065 if (rc) {
2066 dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
2067 retval = -EIO;
2068 }
2069
2070 rc = goya_stop_queue(hdev,
2071 mmTPC4_QM_GLBL_CFG1,
2072 mmTPC4_QM_CP_STS,
2073 mmTPC4_QM_GLBL_STS0);
2074
2075 if (rc) {
2076 dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
2077 retval = -EIO;
2078 }
2079
2080 rc = goya_stop_queue(hdev,
2081 mmTPC4_CMDQ_GLBL_CFG1,
2082 mmTPC4_CMDQ_CP_STS,
2083 mmTPC4_CMDQ_GLBL_STS0);
2084
2085 if (rc) {
2086 dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
2087 retval = -EIO;
2088 }
2089
2090 rc = goya_stop_queue(hdev,
2091 mmTPC5_QM_GLBL_CFG1,
2092 mmTPC5_QM_CP_STS,
2093 mmTPC5_QM_GLBL_STS0);
2094
2095 if (rc) {
2096 dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
2097 retval = -EIO;
2098 }
2099
2100 rc = goya_stop_queue(hdev,
2101 mmTPC5_CMDQ_GLBL_CFG1,
2102 mmTPC5_CMDQ_CP_STS,
2103 mmTPC5_CMDQ_GLBL_STS0);
2104
2105 if (rc) {
2106 dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
2107 retval = -EIO;
2108 }
2109
2110 rc = goya_stop_queue(hdev,
2111 mmTPC6_QM_GLBL_CFG1,
2112 mmTPC6_QM_CP_STS,
2113 mmTPC6_QM_GLBL_STS0);
2114
2115 if (rc) {
2116 dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
2117 retval = -EIO;
2118 }
2119
2120 rc = goya_stop_queue(hdev,
2121 mmTPC6_CMDQ_GLBL_CFG1,
2122 mmTPC6_CMDQ_CP_STS,
2123 mmTPC6_CMDQ_GLBL_STS0);
2124
2125 if (rc) {
2126 dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
2127 retval = -EIO;
2128 }
2129
2130 rc = goya_stop_queue(hdev,
2131 mmTPC7_QM_GLBL_CFG1,
2132 mmTPC7_QM_CP_STS,
2133 mmTPC7_QM_GLBL_STS0);
2134
2135 if (rc) {
2136 dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
2137 retval = -EIO;
2138 }
2139
2140 rc = goya_stop_queue(hdev,
2141 mmTPC7_CMDQ_GLBL_CFG1,
2142 mmTPC7_CMDQ_CP_STS,
2143 mmTPC7_CMDQ_GLBL_STS0);
2144
2145 if (rc) {
2146 dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
2147 retval = -EIO;
2148 }
2149
2150 return retval;
2151}
2152
Oded Gabbay1251f232019-02-16 00:39:18 +02002153static void goya_dma_stall(struct hl_device *hdev)
2154{
2155 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
2156 WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
2157 WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
2158 WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
2159 WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
2160}
2161
2162static void goya_tpc_stall(struct hl_device *hdev)
2163{
2164 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2165 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
2166 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
2167 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
2168 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
2169 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
2170 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
2171 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
2172}
2173
2174static void goya_mme_stall(struct hl_device *hdev)
2175{
2176 WREG32(mmMME_STALL, 0xFFFFFFFF);
2177}
2178
2179static int goya_enable_msix(struct hl_device *hdev)
2180{
2181 struct goya_device *goya = hdev->asic_specific;
2182 int cq_cnt = hdev->asic_prop.completion_queues_count;
2183 int rc, i, irq_cnt_init, irq;
2184
2185 if (goya->hw_cap_initialized & HW_CAP_MSIX)
2186 return 0;
2187
2188 rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
2189 GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
2190 if (rc < 0) {
2191 dev_err(hdev->dev,
2192 "MSI-X: Failed to enable support -- %d/%d\n",
2193 GOYA_MSIX_ENTRIES, rc);
2194 return rc;
2195 }
2196
2197 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
2198 irq = pci_irq_vector(hdev->pdev, i);
2199 rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
2200 &hdev->completion_queue[i]);
2201 if (rc) {
2202 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2203 goto free_irqs;
2204 }
2205 }
2206
2207 irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
2208
2209 rc = request_irq(irq, hl_irq_handler_eq, 0,
2210 goya_irq_name[EVENT_QUEUE_MSIX_IDX],
2211 &hdev->event_queue);
2212 if (rc) {
2213 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2214 goto free_irqs;
2215 }
2216
2217 goya->hw_cap_initialized |= HW_CAP_MSIX;
2218 return 0;
2219
2220free_irqs:
2221 for (i = 0 ; i < irq_cnt_init ; i++)
2222 free_irq(pci_irq_vector(hdev->pdev, i),
2223 &hdev->completion_queue[i]);
2224
2225 pci_free_irq_vectors(hdev->pdev);
2226 return rc;
2227}
2228
2229static void goya_sync_irqs(struct hl_device *hdev)
2230{
2231 struct goya_device *goya = hdev->asic_specific;
2232 int i;
2233
2234 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2235 return;
2236
2237 /* Wait for all pending IRQs to be finished */
2238 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2239 synchronize_irq(pci_irq_vector(hdev->pdev, i));
2240
2241 synchronize_irq(pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX));
2242}
2243
2244static void goya_disable_msix(struct hl_device *hdev)
2245{
2246 struct goya_device *goya = hdev->asic_specific;
2247 int i, irq;
2248
2249 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2250 return;
2251
2252 goya_sync_irqs(hdev);
2253
2254 irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
2255 free_irq(irq, &hdev->event_queue);
2256
2257 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2258 irq = pci_irq_vector(hdev->pdev, i);
2259 free_irq(irq, &hdev->completion_queue[i]);
2260 }
2261
2262 pci_free_irq_vectors(hdev->pdev);
2263
2264 goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2265}
2266
2267static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2268{
2269 u32 wait_timeout_ms, cpu_timeout_ms;
2270
2271 dev_info(hdev->dev,
2272 "Halting compute engines and disabling interrupts\n");
2273
2274 if (hdev->pldm) {
2275 wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2276 cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2277 } else {
2278 wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2279 cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2280 }
2281
2282 if (hard_reset) {
2283 /*
2284 * I don't know what is the state of the CPU so make sure it is
2285 * stopped in any means necessary
2286 */
2287 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2288 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2289 GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2290 msleep(cpu_timeout_ms);
2291 }
2292
2293 goya_stop_external_queues(hdev);
2294 goya_stop_internal_queues(hdev);
2295
2296 msleep(wait_timeout_ms);
2297
2298 goya_dma_stall(hdev);
2299 goya_tpc_stall(hdev);
2300 goya_mme_stall(hdev);
2301
2302 msleep(wait_timeout_ms);
2303
2304 goya_disable_external_queues(hdev);
2305 goya_disable_internal_queues(hdev);
2306
2307 if (hard_reset)
2308 goya_disable_msix(hdev);
2309 else
2310 goya_sync_irqs(hdev);
2311}
Oded Gabbay839c4802019-02-16 00:39:16 +02002312
2313/*
Tomer Tayar3110c602019-03-04 10:22:09 +02002314 * goya_push_uboot_to_device() - Push u-boot FW code to device.
2315 * @hdev: Pointer to hl_device structure.
Oded Gabbay839c4802019-02-16 00:39:16 +02002316 *
Tomer Tayar3110c602019-03-04 10:22:09 +02002317 * Copy u-boot fw code from firmware file to SRAM BAR.
Oded Gabbay839c4802019-02-16 00:39:16 +02002318 *
Tomer Tayar3110c602019-03-04 10:22:09 +02002319 * Return: 0 on success, non-zero for failure.
Oded Gabbay839c4802019-02-16 00:39:16 +02002320 */
Tomer Tayar3110c602019-03-04 10:22:09 +02002321static int goya_push_uboot_to_device(struct hl_device *hdev)
Oded Gabbay839c4802019-02-16 00:39:16 +02002322{
Tomer Tayar3110c602019-03-04 10:22:09 +02002323 char fw_name[200];
2324 void __iomem *dst;
Oded Gabbay839c4802019-02-16 00:39:16 +02002325
Tomer Tayar3110c602019-03-04 10:22:09 +02002326 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
2327 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
Oded Gabbay839c4802019-02-16 00:39:16 +02002328
Tomer Tayar3110c602019-03-04 10:22:09 +02002329 return hl_fw_push_fw_to_device(hdev, fw_name, dst);
2330}
Oded Gabbay839c4802019-02-16 00:39:16 +02002331
Tomer Tayar3110c602019-03-04 10:22:09 +02002332/*
2333 * goya_push_linux_to_device() - Push LINUX FW code to device.
2334 * @hdev: Pointer to hl_device structure.
2335 *
2336 * Copy LINUX fw code from firmware file to HBM BAR.
2337 *
2338 * Return: 0 on success, non-zero for failure.
2339 */
2340static int goya_push_linux_to_device(struct hl_device *hdev)
2341{
2342 char fw_name[200];
2343 void __iomem *dst;
Oded Gabbay839c4802019-02-16 00:39:16 +02002344
Tomer Tayar3110c602019-03-04 10:22:09 +02002345 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2346 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
Oded Gabbay839c4802019-02-16 00:39:16 +02002347
Tomer Tayar3110c602019-03-04 10:22:09 +02002348 return hl_fw_push_fw_to_device(hdev, fw_name, dst);
Oded Gabbay839c4802019-02-16 00:39:16 +02002349}
2350
2351static int goya_pldm_init_cpu(struct hl_device *hdev)
2352{
Oded Gabbay839c4802019-02-16 00:39:16 +02002353 u32 val, unit_rst_val;
2354 int rc;
2355
2356 /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
2357 goya_init_golden_registers(hdev);
2358
2359 /* Put ARM cores into reset */
2360 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
2361 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2362
2363 /* Reset the CA53 MACRO */
2364 unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2365 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
2366 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2367 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
2368 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2369
Tomer Tayar3110c602019-03-04 10:22:09 +02002370 rc = goya_push_uboot_to_device(hdev);
Oded Gabbay839c4802019-02-16 00:39:16 +02002371 if (rc)
2372 return rc;
2373
Tomer Tayar3110c602019-03-04 10:22:09 +02002374 rc = goya_push_linux_to_device(hdev);
Oded Gabbay839c4802019-02-16 00:39:16 +02002375 if (rc)
2376 return rc;
2377
2378 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2379 WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA);
2380
2381 WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0,
2382 lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2383 WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0,
2384 upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2385
2386 /* Release ARM core 0 from reset */
2387 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
2388 CPU_RESET_CORE0_DEASSERT);
2389 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2390
2391 return 0;
2392}
2393
2394/*
2395 * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
2396 * The version string should be located by that offset.
2397 */
2398static void goya_read_device_fw_version(struct hl_device *hdev,
2399 enum goya_fw_component fwc)
2400{
2401 const char *name;
2402 u32 ver_off;
2403 char *dest;
2404
2405 switch (fwc) {
2406 case FW_COMP_UBOOT:
2407 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29);
2408 dest = hdev->asic_prop.uboot_ver;
2409 name = "U-Boot";
2410 break;
2411 case FW_COMP_PREBOOT:
2412 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28);
2413 dest = hdev->asic_prop.preboot_ver;
2414 name = "Preboot";
2415 break;
2416 default:
2417 dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2418 return;
2419 }
2420
2421 ver_off &= ~((u32)SRAM_BASE_ADDR);
2422
2423 if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
2424 memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
2425 VERSION_MAX_LEN);
2426 } else {
2427 dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
2428 name, ver_off);
2429 strcpy(dest, "unavailable");
2430 }
2431}
2432
2433static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
2434{
2435 struct goya_device *goya = hdev->asic_specific;
Oded Gabbay839c4802019-02-16 00:39:16 +02002436 u32 status;
2437 int rc;
2438
2439 if (!hdev->cpu_enable)
2440 return 0;
2441
2442 if (goya->hw_cap_initialized & HW_CAP_CPU)
2443 return 0;
2444
2445 /*
2446 * Before pushing u-boot/linux to device, need to set the ddr bar to
2447 * base address of dram
2448 */
2449 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2450 if (rc) {
2451 dev_err(hdev->dev,
2452 "failed to map DDR bar to DRAM base address\n");
2453 return rc;
2454 }
2455
2456 if (hdev->pldm) {
2457 rc = goya_pldm_init_cpu(hdev);
2458 if (rc)
2459 return rc;
2460
2461 goto out;
2462 }
2463
2464 /* Make sure CPU boot-loader is running */
2465 rc = hl_poll_timeout(
2466 hdev,
2467 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2468 status,
2469 (status == CPU_BOOT_STATUS_DRAM_RDY) ||
2470 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2471 10000,
2472 cpu_timeout);
2473
2474 if (rc) {
2475 dev_err(hdev->dev, "Error in ARM u-boot!");
2476 switch (status) {
2477 case CPU_BOOT_STATUS_NA:
2478 dev_err(hdev->dev,
2479 "ARM status %d - BTL did NOT run\n", status);
2480 break;
2481 case CPU_BOOT_STATUS_IN_WFE:
2482 dev_err(hdev->dev,
2483 "ARM status %d - Inside WFE loop\n", status);
2484 break;
2485 case CPU_BOOT_STATUS_IN_BTL:
2486 dev_err(hdev->dev,
2487 "ARM status %d - Stuck in BTL\n", status);
2488 break;
2489 case CPU_BOOT_STATUS_IN_PREBOOT:
2490 dev_err(hdev->dev,
2491 "ARM status %d - Stuck in Preboot\n", status);
2492 break;
2493 case CPU_BOOT_STATUS_IN_SPL:
2494 dev_err(hdev->dev,
2495 "ARM status %d - Stuck in SPL\n", status);
2496 break;
2497 case CPU_BOOT_STATUS_IN_UBOOT:
2498 dev_err(hdev->dev,
2499 "ARM status %d - Stuck in u-boot\n", status);
2500 break;
2501 case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
2502 dev_err(hdev->dev,
2503 "ARM status %d - DDR initialization failed\n",
2504 status);
2505 break;
Igor Grinberg0ca3b1b2019-02-24 11:20:02 +02002506 case CPU_BOOT_STATUS_UBOOT_NOT_READY:
2507 dev_err(hdev->dev,
2508 "ARM status %d - u-boot stopped by user\n",
2509 status);
2510 break;
Oded Gabbay839c4802019-02-16 00:39:16 +02002511 default:
2512 dev_err(hdev->dev,
2513 "ARM status %d - Invalid status code\n",
2514 status);
2515 break;
2516 }
2517 return -EIO;
2518 }
2519
2520 /* Read U-Boot version now in case we will later fail */
2521 goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
2522 goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
2523
2524 if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
2525 goto out;
2526
2527 if (!hdev->fw_loading) {
2528 dev_info(hdev->dev, "Skip loading FW\n");
2529 goto out;
2530 }
2531
Tomer Tayar3110c602019-03-04 10:22:09 +02002532 rc = goya_push_linux_to_device(hdev);
Oded Gabbay839c4802019-02-16 00:39:16 +02002533 if (rc)
2534 return rc;
2535
2536 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2537
2538 rc = hl_poll_timeout(
2539 hdev,
2540 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2541 status,
2542 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2543 10000,
2544 cpu_timeout);
2545
2546 if (rc) {
2547 if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
2548 dev_err(hdev->dev,
2549 "ARM u-boot reports FIT image is corrupted\n");
2550 else
2551 dev_err(hdev->dev,
2552 "ARM Linux failed to load, %d\n", status);
2553 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA);
2554 return -EIO;
2555 }
2556
2557 dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2558
2559out:
2560 goya->hw_cap_initialized |= HW_CAP_CPU;
2561
2562 return 0;
2563}
2564
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002565static int goya_mmu_init(struct hl_device *hdev)
2566{
2567 struct asic_fixed_properties *prop = &hdev->asic_prop;
2568 struct goya_device *goya = hdev->asic_specific;
2569 u64 hop0_addr;
2570 int rc, i;
2571
2572 if (!hdev->mmu_enable)
2573 return 0;
2574
2575 if (goya->hw_cap_initialized & HW_CAP_MMU)
2576 return 0;
2577
2578 hdev->dram_supports_virtual_memory = true;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02002579 hdev->dram_default_page_mapping = true;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002580
2581 for (i = 0 ; i < prop->max_asid ; i++) {
2582 hop0_addr = prop->mmu_pgt_addr +
2583 (i * prop->mmu_hop_table_size);
2584
2585 rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2586 if (rc) {
2587 dev_err(hdev->dev,
2588 "failed to set hop0 addr for asid %d\n", i);
2589 goto err;
2590 }
2591 }
2592
2593 goya->hw_cap_initialized |= HW_CAP_MMU;
2594
2595 /* init MMU cache manage page */
Oded Gabbay1e7c1ec2019-02-28 10:46:13 +02002596 WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2597 lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2598 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002599
2600 /* Remove follower feature due to performance bug */
2601 WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2602 (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2603
2604 hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
2605
2606 WREG32(mmMMU_MMU_ENABLE, 1);
2607 WREG32(mmMMU_SPI_MASK, 0xF);
2608
2609 return 0;
2610
2611err:
2612 return rc;
2613}
2614
Oded Gabbay839c4802019-02-16 00:39:16 +02002615/*
2616 * goya_hw_init - Goya hardware initialization code
2617 *
2618 * @hdev: pointer to hl_device structure
2619 *
2620 * Returns 0 on success
2621 *
2622 */
2623static int goya_hw_init(struct hl_device *hdev)
2624{
2625 struct asic_fixed_properties *prop = &hdev->asic_prop;
2626 u32 val;
2627 int rc;
2628
2629 dev_info(hdev->dev, "Starting initialization of H/W\n");
2630
2631 /* Perform read from the device to make sure device is up */
2632 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2633
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02002634 /*
2635 * Let's mark in the H/W that we have reached this point. We check
2636 * this value in the reset_before_init function to understand whether
2637 * we need to reset the chip before doing H/W init. This register is
2638 * cleared by the H/W upon H/W reset
2639 */
2640 WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY);
2641
Oded Gabbay839c4802019-02-16 00:39:16 +02002642 rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
2643 if (rc) {
2644 dev_err(hdev->dev, "failed to initialize CPU\n");
2645 return rc;
2646 }
2647
2648 goya_tpc_mbist_workaround(hdev);
2649
2650 goya_init_golden_registers(hdev);
2651
2652 /*
2653 * After CPU initialization is finished, change DDR bar mapping inside
2654 * iATU to point to the start address of the MMU page tables
2655 */
2656 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
2657 (MMU_PAGE_TABLES_ADDR & ~(prop->dram_pci_bar_size - 0x1ull)));
2658 if (rc) {
2659 dev_err(hdev->dev,
2660 "failed to map DDR bar to MMU page tables\n");
2661 return rc;
2662 }
2663
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002664 rc = goya_mmu_init(hdev);
2665 if (rc)
2666 return rc;
2667
Oded Gabbay839c4802019-02-16 00:39:16 +02002668 goya_init_security(hdev);
2669
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002670 goya_init_dma_qmans(hdev);
2671
2672 goya_init_mme_qmans(hdev);
2673
2674 goya_init_tpc_qmans(hdev);
2675
Oded Gabbay1251f232019-02-16 00:39:18 +02002676 /* MSI-X must be enabled before CPU queues are initialized */
2677 rc = goya_enable_msix(hdev);
2678 if (rc)
2679 goto disable_queues;
2680
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002681 rc = goya_init_cpu_queues(hdev);
2682 if (rc) {
2683 dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
2684 rc);
Oded Gabbay1251f232019-02-16 00:39:18 +02002685 goto disable_msix;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002686 }
2687
Oded Gabbay839c4802019-02-16 00:39:16 +02002688 /* CPU initialization is finished, we can now move to 48 bit DMA mask */
2689 rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
2690 if (rc) {
2691 dev_warn(hdev->dev, "Unable to set pci dma mask to 48 bits\n");
2692 rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
2693 if (rc) {
2694 dev_err(hdev->dev,
2695 "Unable to set pci dma mask to 32 bits\n");
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002696 goto disable_pci_access;
Oded Gabbay839c4802019-02-16 00:39:16 +02002697 }
2698 }
2699
2700 rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
2701 if (rc) {
2702 dev_warn(hdev->dev,
2703 "Unable to set pci consistent dma mask to 48 bits\n");
2704 rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
2705 if (rc) {
2706 dev_err(hdev->dev,
2707 "Unable to set pci consistent dma mask to 32 bits\n");
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002708 goto disable_pci_access;
Oded Gabbay839c4802019-02-16 00:39:16 +02002709 }
2710 }
2711
2712 /* Perform read from the device to flush all MSI-X configuration */
2713 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2714
2715 return 0;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002716
2717disable_pci_access:
Tomer Tayar3110c602019-03-04 10:22:09 +02002718 hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
Oded Gabbay1251f232019-02-16 00:39:18 +02002719disable_msix:
2720 goya_disable_msix(hdev);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002721disable_queues:
2722 goya_disable_internal_queues(hdev);
2723 goya_disable_external_queues(hdev);
2724
2725 return rc;
Oded Gabbay839c4802019-02-16 00:39:16 +02002726}
2727
2728/*
2729 * goya_hw_fini - Goya hardware tear-down code
2730 *
2731 * @hdev: pointer to hl_device structure
2732 * @hard_reset: should we do hard reset to all engines or just reset the
2733 * compute/dma engines
2734 */
2735static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
2736{
2737 struct goya_device *goya = hdev->asic_specific;
2738 u32 reset_timeout_ms, status;
2739
2740 if (hdev->pldm)
2741 reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2742 else
2743 reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2744
2745 if (hard_reset) {
2746 goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2747 goya_disable_clk_rlx(hdev);
2748 goya_set_pll_refclk(hdev);
2749
2750 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2751 dev_info(hdev->dev,
2752 "Issued HARD reset command, going to wait %dms\n",
2753 reset_timeout_ms);
2754 } else {
2755 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2756 dev_info(hdev->dev,
2757 "Issued SOFT reset command, going to wait %dms\n",
2758 reset_timeout_ms);
2759 }
2760
2761 /*
2762 * After hard reset, we can't poll the BTM_FSM register because the PSOC
2763 * itself is in reset. In either reset we need to wait until the reset
2764 * is deasserted
2765 */
2766 msleep(reset_timeout_ms);
2767
2768 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2769 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
2770 dev_err(hdev->dev,
2771 "Timeout while waiting for device to reset 0x%x\n",
2772 status);
2773
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02002774 if (!hard_reset) {
2775 goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2776 HW_CAP_GOLDEN | HW_CAP_TPC);
2777 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2778 GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2779 return;
2780 }
2781
Oded Gabbay839c4802019-02-16 00:39:16 +02002782 /* Chicken bit to re-initiate boot sequencer flow */
2783 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2784 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2785 /* Move boot manager FSM to pre boot sequencer init state */
2786 WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2787 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2788
2789 goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2790 HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2791 HW_CAP_DMA | HW_CAP_MME |
2792 HW_CAP_MMU | HW_CAP_TPC_MBIST |
2793 HW_CAP_GOLDEN | HW_CAP_TPC);
Oded Gabbay1251f232019-02-16 00:39:18 +02002794 memset(goya->events_stat, 0, sizeof(goya->events_stat));
Oded Gabbay839c4802019-02-16 00:39:16 +02002795
2796 if (!hdev->pldm) {
2797 int rc;
2798 /* In case we are running inside VM and the VM is
2799 * shutting down, we need to make sure CPU boot-loader
2800 * is running before we can continue the VM shutdown.
2801 * That is because the VM will send an FLR signal that
2802 * we must answer
2803 */
2804 dev_info(hdev->dev,
2805 "Going to wait up to %ds for CPU boot loader\n",
2806 GOYA_CPU_TIMEOUT_USEC / 1000 / 1000);
2807
2808 rc = hl_poll_timeout(
2809 hdev,
2810 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2811 status,
2812 (status == CPU_BOOT_STATUS_DRAM_RDY),
2813 10000,
2814 GOYA_CPU_TIMEOUT_USEC);
2815 if (rc)
2816 dev_err(hdev->dev,
2817 "failed to wait for CPU boot loader\n");
2818 }
2819}
2820
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002821int goya_suspend(struct hl_device *hdev)
2822{
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002823 int rc;
2824
Tomer Tayar3110c602019-03-04 10:22:09 +02002825 rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002826 if (rc)
2827 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2828
2829 return rc;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002830}
2831
2832int goya_resume(struct hl_device *hdev)
2833{
Oded Gabbay7cb51012019-03-03 22:29:20 +02002834 return goya_init_iatu(hdev);
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002835}
2836
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002837static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
Oded Gabbaybe5d9262019-02-16 00:39:15 +02002838 u64 kaddress, phys_addr_t paddress, u32 size)
2839{
2840 int rc;
2841
2842 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2843 VM_DONTCOPY | VM_NORESERVE;
2844
2845 rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
2846 size, vma->vm_page_prot);
2847 if (rc)
2848 dev_err(hdev->dev, "remap_pfn_range error %d", rc);
2849
2850 return rc;
2851}
2852
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002853static void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002854{
2855 u32 db_reg_offset, db_value;
2856 bool invalid_queue = false;
2857
2858 switch (hw_queue_id) {
2859 case GOYA_QUEUE_ID_DMA_0:
2860 db_reg_offset = mmDMA_QM_0_PQ_PI;
2861 break;
2862
2863 case GOYA_QUEUE_ID_DMA_1:
2864 db_reg_offset = mmDMA_QM_1_PQ_PI;
2865 break;
2866
2867 case GOYA_QUEUE_ID_DMA_2:
2868 db_reg_offset = mmDMA_QM_2_PQ_PI;
2869 break;
2870
2871 case GOYA_QUEUE_ID_DMA_3:
2872 db_reg_offset = mmDMA_QM_3_PQ_PI;
2873 break;
2874
2875 case GOYA_QUEUE_ID_DMA_4:
2876 db_reg_offset = mmDMA_QM_4_PQ_PI;
2877 break;
2878
2879 case GOYA_QUEUE_ID_CPU_PQ:
2880 if (hdev->cpu_queues_enable)
2881 db_reg_offset = mmCPU_IF_PF_PQ_PI;
2882 else
2883 invalid_queue = true;
2884 break;
2885
2886 case GOYA_QUEUE_ID_MME:
2887 db_reg_offset = mmMME_QM_PQ_PI;
2888 break;
2889
2890 case GOYA_QUEUE_ID_TPC0:
2891 db_reg_offset = mmTPC0_QM_PQ_PI;
2892 break;
2893
2894 case GOYA_QUEUE_ID_TPC1:
2895 db_reg_offset = mmTPC1_QM_PQ_PI;
2896 break;
2897
2898 case GOYA_QUEUE_ID_TPC2:
2899 db_reg_offset = mmTPC2_QM_PQ_PI;
2900 break;
2901
2902 case GOYA_QUEUE_ID_TPC3:
2903 db_reg_offset = mmTPC3_QM_PQ_PI;
2904 break;
2905
2906 case GOYA_QUEUE_ID_TPC4:
2907 db_reg_offset = mmTPC4_QM_PQ_PI;
2908 break;
2909
2910 case GOYA_QUEUE_ID_TPC5:
2911 db_reg_offset = mmTPC5_QM_PQ_PI;
2912 break;
2913
2914 case GOYA_QUEUE_ID_TPC6:
2915 db_reg_offset = mmTPC6_QM_PQ_PI;
2916 break;
2917
2918 case GOYA_QUEUE_ID_TPC7:
2919 db_reg_offset = mmTPC7_QM_PQ_PI;
2920 break;
2921
2922 default:
2923 invalid_queue = true;
2924 }
2925
2926 if (invalid_queue) {
2927 /* Should never get here */
2928 dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
2929 hw_queue_id);
2930 return;
2931 }
2932
2933 db_value = pi;
2934
2935 /* ring the doorbell */
2936 WREG32(db_reg_offset, db_value);
2937
2938 if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
2939 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2940 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
2941}
2942
2943void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
2944{
2945 /* Not needed in Goya */
2946}
2947
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002948static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002949 dma_addr_t *dma_handle, gfp_t flags)
2950{
2951 return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags);
2952}
2953
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002954static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
2955 void *cpu_addr, dma_addr_t dma_handle)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002956{
2957 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle);
2958}
2959
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002960void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
2961 dma_addr_t *dma_handle, u16 *queue_len)
2962{
2963 void *base;
2964 u32 offset;
2965
2966 *dma_handle = hdev->asic_prop.sram_base_address;
2967
Oded Gabbay7c222782019-03-03 10:23:29 +02002968 base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002969
2970 switch (queue_id) {
2971 case GOYA_QUEUE_ID_MME:
2972 offset = MME_QMAN_BASE_OFFSET;
2973 *queue_len = MME_QMAN_LENGTH;
2974 break;
2975 case GOYA_QUEUE_ID_TPC0:
2976 offset = TPC0_QMAN_BASE_OFFSET;
2977 *queue_len = TPC_QMAN_LENGTH;
2978 break;
2979 case GOYA_QUEUE_ID_TPC1:
2980 offset = TPC1_QMAN_BASE_OFFSET;
2981 *queue_len = TPC_QMAN_LENGTH;
2982 break;
2983 case GOYA_QUEUE_ID_TPC2:
2984 offset = TPC2_QMAN_BASE_OFFSET;
2985 *queue_len = TPC_QMAN_LENGTH;
2986 break;
2987 case GOYA_QUEUE_ID_TPC3:
2988 offset = TPC3_QMAN_BASE_OFFSET;
2989 *queue_len = TPC_QMAN_LENGTH;
2990 break;
2991 case GOYA_QUEUE_ID_TPC4:
2992 offset = TPC4_QMAN_BASE_OFFSET;
2993 *queue_len = TPC_QMAN_LENGTH;
2994 break;
2995 case GOYA_QUEUE_ID_TPC5:
2996 offset = TPC5_QMAN_BASE_OFFSET;
2997 *queue_len = TPC_QMAN_LENGTH;
2998 break;
2999 case GOYA_QUEUE_ID_TPC6:
3000 offset = TPC6_QMAN_BASE_OFFSET;
3001 *queue_len = TPC_QMAN_LENGTH;
3002 break;
3003 case GOYA_QUEUE_ID_TPC7:
3004 offset = TPC7_QMAN_BASE_OFFSET;
3005 *queue_len = TPC_QMAN_LENGTH;
3006 break;
3007 default:
3008 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
3009 return NULL;
3010 }
3011
3012 base += offset;
3013 *dma_handle += offset;
3014
3015 return base;
3016}
3017
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003018static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003019{
3020 struct goya_device *goya = hdev->asic_specific;
3021 struct packet_msg_prot *fence_pkt;
3022 u32 *fence_ptr;
3023 dma_addr_t fence_dma_addr;
3024 struct hl_cb *cb;
Omer Shpigelman3dccd182019-02-28 10:46:16 +02003025 u32 tmp, timeout;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003026 int rc;
3027
Omer Shpigelman3dccd182019-02-28 10:46:16 +02003028 if (hdev->pldm)
3029 timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
3030 else
3031 timeout = HL_DEVICE_TIMEOUT_USEC;
3032
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003033 if (!hdev->asic_funcs->is_device_idle(hdev)) {
3034 dev_err_ratelimited(hdev->dev,
3035 "Can't send KMD job on QMAN0 if device is not idle\n");
Oded Gabbayaf5f7ee2019-02-28 10:46:21 +02003036 return -EBUSY;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003037 }
3038
3039 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3040 &fence_dma_addr);
3041 if (!fence_ptr) {
3042 dev_err(hdev->dev,
3043 "Failed to allocate fence memory for QMAN0\n");
3044 return -ENOMEM;
3045 }
3046
3047 *fence_ptr = 0;
3048
3049 if (goya->hw_cap_initialized & HW_CAP_MMU) {
3050 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
3051 RREG32(mmDMA_QM_0_GLBL_PROT);
3052 }
3053
3054 /*
3055 * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For
3056 * synchronized kernel jobs we only need space for 1 packet_msg_prot
3057 */
3058 job->job_cb_size -= sizeof(struct packet_msg_prot);
3059
3060 cb = job->patched_cb;
3061
3062 fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
3063 job->job_cb_size - sizeof(struct packet_msg_prot));
3064
Tomer Tayardf697bc2019-02-28 10:46:22 +02003065 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003066 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3067 (1 << GOYA_PKT_CTL_MB_SHIFT);
Tomer Tayardf697bc2019-02-28 10:46:22 +02003068 fence_pkt->ctl = cpu_to_le32(tmp);
3069 fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
3070 fence_pkt->addr = cpu_to_le64(fence_dma_addr +
3071 hdev->asic_prop.host_phys_base_address);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003072
3073 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
3074 job->job_cb_size, cb->bus_address);
3075 if (rc) {
3076 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
3077 goto free_fence_ptr;
3078 }
3079
Omer Shpigelman3dccd182019-02-28 10:46:16 +02003080 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, timeout,
3081 &tmp);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003082
3083 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
3084
3085 if ((rc) || (tmp != GOYA_QMAN0_FENCE_VAL)) {
3086 dev_err(hdev->dev, "QMAN0 Job hasn't finished in time\n");
3087 rc = -ETIMEDOUT;
3088 }
3089
3090free_fence_ptr:
3091 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
3092 fence_dma_addr);
3093
3094 if (goya->hw_cap_initialized & HW_CAP_MMU) {
3095 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
3096 RREG32(mmDMA_QM_0_GLBL_PROT);
3097 }
3098
3099 return rc;
3100}
3101
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003102int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
3103 u32 timeout, long *result)
3104{
3105 struct goya_device *goya = hdev->asic_specific;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003106
3107 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
3108 if (result)
3109 *result = 0;
3110 return 0;
3111 }
3112
Tomer Tayar3110c602019-03-04 10:22:09 +02003113 return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
3114 timeout, result);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003115}
3116
3117int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3118{
3119 struct packet_msg_prot *fence_pkt;
3120 dma_addr_t pkt_dma_addr;
3121 u32 fence_val, tmp;
3122 dma_addr_t fence_dma_addr;
3123 u32 *fence_ptr;
3124 int rc;
3125
3126 fence_val = GOYA_QMAN0_FENCE_VAL;
3127
3128 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3129 &fence_dma_addr);
3130 if (!fence_ptr) {
3131 dev_err(hdev->dev,
3132 "Failed to allocate memory for queue testing\n");
3133 return -ENOMEM;
3134 }
3135
3136 *fence_ptr = 0;
3137
3138 fence_pkt = hdev->asic_funcs->dma_pool_zalloc(hdev,
3139 sizeof(struct packet_msg_prot),
3140 GFP_KERNEL, &pkt_dma_addr);
3141 if (!fence_pkt) {
3142 dev_err(hdev->dev,
3143 "Failed to allocate packet for queue testing\n");
3144 rc = -ENOMEM;
3145 goto free_fence_ptr;
3146 }
3147
Tomer Tayardf697bc2019-02-28 10:46:22 +02003148 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003149 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3150 (1 << GOYA_PKT_CTL_MB_SHIFT);
Tomer Tayardf697bc2019-02-28 10:46:22 +02003151 fence_pkt->ctl = cpu_to_le32(tmp);
3152 fence_pkt->value = cpu_to_le32(fence_val);
3153 fence_pkt->addr = cpu_to_le64(fence_dma_addr +
3154 hdev->asic_prop.host_phys_base_address);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003155
3156 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
3157 sizeof(struct packet_msg_prot),
3158 pkt_dma_addr);
3159 if (rc) {
3160 dev_err(hdev->dev,
3161 "Failed to send fence packet\n");
3162 goto free_pkt;
3163 }
3164
3165 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr,
3166 GOYA_TEST_QUEUE_WAIT_USEC, &tmp);
3167
3168 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
3169
3170 if ((!rc) && (tmp == fence_val)) {
3171 dev_info(hdev->dev,
3172 "queue test on H/W queue %d succeeded\n",
3173 hw_queue_id);
3174 } else {
3175 dev_err(hdev->dev,
3176 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
3177 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
3178 rc = -EINVAL;
3179 }
3180
3181free_pkt:
3182 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_pkt,
3183 pkt_dma_addr);
3184free_fence_ptr:
3185 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
3186 fence_dma_addr);
3187 return rc;
3188}
3189
3190int goya_test_cpu_queue(struct hl_device *hdev)
3191{
Tomer Tayar3110c602019-03-04 10:22:09 +02003192 struct goya_device *goya = hdev->asic_specific;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003193
Tomer Tayar3110c602019-03-04 10:22:09 +02003194 /*
3195 * check capability here as send_cpu_message() won't update the result
3196 * value if no capability
3197 */
3198 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
3199 return 0;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003200
Tomer Tayar3110c602019-03-04 10:22:09 +02003201 return hl_fw_test_cpu_queue(hdev);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003202}
3203
3204static int goya_test_queues(struct hl_device *hdev)
3205{
3206 struct goya_device *goya = hdev->asic_specific;
3207 int i, rc, ret_val = 0;
3208
3209 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
3210 rc = goya_test_queue(hdev, i);
3211 if (rc)
3212 ret_val = -EINVAL;
3213 }
3214
3215 if (hdev->cpu_queues_enable) {
3216 rc = goya->test_cpu_queue(hdev);
3217 if (rc)
3218 ret_val = -EINVAL;
3219 }
3220
3221 return ret_val;
3222}
3223
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003224static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
3225 gfp_t mem_flags, dma_addr_t *dma_handle)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003226{
3227 if (size > GOYA_DMA_POOL_BLK_SIZE)
3228 return NULL;
3229
3230 return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
3231}
3232
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003233static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
3234 dma_addr_t dma_addr)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003235{
3236 dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
3237}
3238
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003239static void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
3240 size_t size, dma_addr_t *dma_handle)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003241{
Tomer Tayar3110c602019-03-04 10:22:09 +02003242 return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003243}
3244
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003245static void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev,
3246 size_t size, void *vaddr)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003247{
Tomer Tayar3110c602019-03-04 10:22:09 +02003248 hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003249}
3250
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003251static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg,
3252 int nents, enum dma_data_direction dir)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003253{
3254 if (!dma_map_sg(&hdev->pdev->dev, sg, nents, dir))
3255 return -ENOMEM;
3256
3257 return 0;
3258}
3259
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003260static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sg,
3261 int nents, enum dma_data_direction dir)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003262{
3263 dma_unmap_sg(&hdev->pdev->dev, sg, nents, dir);
3264}
3265
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003266u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003267{
3268 struct scatterlist *sg, *sg_next_iter;
Oded Gabbaye99f16832019-02-24 11:55:26 +02003269 u32 count, dma_desc_cnt;
3270 u64 len, len_next;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003271 dma_addr_t addr, addr_next;
3272
3273 dma_desc_cnt = 0;
3274
3275 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3276
3277 len = sg_dma_len(sg);
3278 addr = sg_dma_address(sg);
3279
3280 if (len == 0)
3281 break;
3282
3283 while ((count + 1) < sgt->nents) {
3284 sg_next_iter = sg_next(sg);
3285 len_next = sg_dma_len(sg_next_iter);
3286 addr_next = sg_dma_address(sg_next_iter);
3287
3288 if (len_next == 0)
3289 break;
3290
3291 if ((addr + len == addr_next) &&
3292 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3293 len += len_next;
3294 count++;
3295 sg = sg_next_iter;
3296 } else {
3297 break;
3298 }
3299 }
3300
3301 dma_desc_cnt++;
3302 }
3303
3304 return dma_desc_cnt * sizeof(struct packet_lin_dma);
3305}
3306
3307static int goya_pin_memory_before_cs(struct hl_device *hdev,
3308 struct hl_cs_parser *parser,
3309 struct packet_lin_dma *user_dma_pkt,
3310 u64 addr, enum dma_data_direction dir)
3311{
3312 struct hl_userptr *userptr;
3313 int rc;
3314
Tomer Tayardf697bc2019-02-28 10:46:22 +02003315 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003316 parser->job_userptr_list, &userptr))
3317 goto already_pinned;
3318
3319 userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
3320 if (!userptr)
3321 return -ENOMEM;
3322
Tomer Tayardf697bc2019-02-28 10:46:22 +02003323 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3324 userptr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003325 if (rc)
3326 goto free_userptr;
3327
3328 list_add_tail(&userptr->job_node, parser->job_userptr_list);
3329
3330 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
3331 userptr->sgt->nents, dir);
3332 if (rc) {
3333 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3334 goto unpin_memory;
3335 }
3336
3337 userptr->dma_mapped = true;
3338 userptr->dir = dir;
3339
3340already_pinned:
3341 parser->patched_cb_size +=
3342 goya_get_dma_desc_list_size(hdev, userptr->sgt);
3343
3344 return 0;
3345
3346unpin_memory:
3347 hl_unpin_host_memory(hdev, userptr);
3348free_userptr:
3349 kfree(userptr);
3350 return rc;
3351}
3352
3353static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3354 struct hl_cs_parser *parser,
3355 struct packet_lin_dma *user_dma_pkt)
3356{
3357 u64 device_memory_addr, addr;
3358 enum dma_data_direction dir;
3359 enum goya_dma_direction user_dir;
3360 bool sram_addr = true;
3361 bool skip_host_mem_pin = false;
3362 bool user_memset;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003363 u32 ctl;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003364 int rc = 0;
3365
Tomer Tayardf697bc2019-02-28 10:46:22 +02003366 ctl = le32_to_cpu(user_dma_pkt->ctl);
3367
3368 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003369 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3370
Tomer Tayardf697bc2019-02-28 10:46:22 +02003371 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003372 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3373
3374 switch (user_dir) {
3375 case DMA_HOST_TO_DRAM:
3376 dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3377 dir = DMA_TO_DEVICE;
3378 sram_addr = false;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003379 addr = le64_to_cpu(user_dma_pkt->src_addr);
3380 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003381 if (user_memset)
3382 skip_host_mem_pin = true;
3383 break;
3384
3385 case DMA_DRAM_TO_HOST:
3386 dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3387 dir = DMA_FROM_DEVICE;
3388 sram_addr = false;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003389 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3390 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003391 break;
3392
3393 case DMA_HOST_TO_SRAM:
3394 dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3395 dir = DMA_TO_DEVICE;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003396 addr = le64_to_cpu(user_dma_pkt->src_addr);
3397 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003398 if (user_memset)
3399 skip_host_mem_pin = true;
3400 break;
3401
3402 case DMA_SRAM_TO_HOST:
3403 dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3404 dir = DMA_FROM_DEVICE;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003405 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3406 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003407 break;
3408 default:
3409 dev_err(hdev->dev, "DMA direction is undefined\n");
3410 return -EFAULT;
3411 }
3412
3413 if (parser->ctx_id != HL_KERNEL_ASID_ID) {
3414 if (sram_addr) {
3415 if (!hl_mem_area_inside_range(device_memory_addr,
Tomer Tayardf697bc2019-02-28 10:46:22 +02003416 le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003417 hdev->asic_prop.sram_user_base_address,
3418 hdev->asic_prop.sram_end_address)) {
3419
3420 dev_err(hdev->dev,
3421 "SRAM address 0x%llx + 0x%x is invalid\n",
3422 device_memory_addr,
3423 user_dma_pkt->tsize);
3424 return -EFAULT;
3425 }
3426 } else {
3427 if (!hl_mem_area_inside_range(device_memory_addr,
Tomer Tayardf697bc2019-02-28 10:46:22 +02003428 le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003429 hdev->asic_prop.dram_user_base_address,
3430 hdev->asic_prop.dram_end_address)) {
3431
3432 dev_err(hdev->dev,
3433 "DRAM address 0x%llx + 0x%x is invalid\n",
3434 device_memory_addr,
3435 user_dma_pkt->tsize);
3436 return -EFAULT;
3437 }
3438 }
3439 }
3440
3441 if (skip_host_mem_pin)
3442 parser->patched_cb_size += sizeof(*user_dma_pkt);
3443 else {
3444 if ((dir == DMA_TO_DEVICE) &&
3445 (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3446 dev_err(hdev->dev,
3447 "Can't DMA from host on queue other then 1\n");
3448 return -EFAULT;
3449 }
3450
3451 rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3452 addr, dir);
3453 }
3454
3455 return rc;
3456}
3457
3458static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3459 struct hl_cs_parser *parser,
3460 struct packet_lin_dma *user_dma_pkt)
3461{
3462 u64 sram_memory_addr, dram_memory_addr;
3463 enum goya_dma_direction user_dir;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003464 u32 ctl;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003465
Tomer Tayardf697bc2019-02-28 10:46:22 +02003466 ctl = le32_to_cpu(user_dma_pkt->ctl);
3467 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003468 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3469
3470 if (user_dir == DMA_DRAM_TO_SRAM) {
3471 dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
Tomer Tayardf697bc2019-02-28 10:46:22 +02003472 dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3473 sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003474 } else {
3475 dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
Tomer Tayardf697bc2019-02-28 10:46:22 +02003476 sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3477 dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003478 }
3479
Tomer Tayardf697bc2019-02-28 10:46:22 +02003480 if (!hl_mem_area_inside_range(sram_memory_addr,
3481 le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003482 hdev->asic_prop.sram_user_base_address,
3483 hdev->asic_prop.sram_end_address)) {
3484 dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3485 sram_memory_addr, user_dma_pkt->tsize);
3486 return -EFAULT;
3487 }
3488
Tomer Tayardf697bc2019-02-28 10:46:22 +02003489 if (!hl_mem_area_inside_range(dram_memory_addr,
3490 le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003491 hdev->asic_prop.dram_user_base_address,
3492 hdev->asic_prop.dram_end_address)) {
3493 dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3494 dram_memory_addr, user_dma_pkt->tsize);
3495 return -EFAULT;
3496 }
3497
3498 parser->patched_cb_size += sizeof(*user_dma_pkt);
3499
3500 return 0;
3501}
3502
3503static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3504 struct hl_cs_parser *parser,
3505 struct packet_lin_dma *user_dma_pkt)
3506{
3507 enum goya_dma_direction user_dir;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003508 u32 ctl;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003509 int rc;
3510
3511 dev_dbg(hdev->dev, "DMA packet details:\n");
3512 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3513 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3514 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3515
Tomer Tayardf697bc2019-02-28 10:46:22 +02003516 ctl = le32_to_cpu(user_dma_pkt->ctl);
3517 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003518 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3519
3520 /*
3521 * Special handling for DMA with size 0. The H/W has a bug where
3522 * this can cause the QMAN DMA to get stuck, so block it here.
3523 */
3524 if (user_dma_pkt->tsize == 0) {
3525 dev_err(hdev->dev,
3526 "Got DMA with size 0, might reset the device\n");
3527 return -EINVAL;
3528 }
3529
3530 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
3531 rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3532 else
3533 rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3534
3535 return rc;
3536}
3537
3538static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3539 struct hl_cs_parser *parser,
3540 struct packet_lin_dma *user_dma_pkt)
3541{
3542 dev_dbg(hdev->dev, "DMA packet details:\n");
3543 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3544 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3545 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3546
3547 /*
3548 * WA for HW-23.
3549 * We can't allow user to read from Host using QMANs other than 1.
3550 */
3551 if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
Tomer Tayardf697bc2019-02-28 10:46:22 +02003552 hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
3553 le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003554 hdev->asic_prop.va_space_host_start_address,
3555 hdev->asic_prop.va_space_host_end_address)) {
3556 dev_err(hdev->dev,
3557 "Can't DMA from host on queue other then 1\n");
3558 return -EFAULT;
3559 }
3560
3561 if (user_dma_pkt->tsize == 0) {
3562 dev_err(hdev->dev,
3563 "Got DMA with size 0, might reset the device\n");
3564 return -EINVAL;
3565 }
3566
3567 parser->patched_cb_size += sizeof(*user_dma_pkt);
3568
3569 return 0;
3570}
3571
3572static int goya_validate_wreg32(struct hl_device *hdev,
3573 struct hl_cs_parser *parser,
3574 struct packet_wreg32 *wreg_pkt)
3575{
3576 struct goya_device *goya = hdev->asic_specific;
3577 u32 sob_start_addr, sob_end_addr;
3578 u16 reg_offset;
3579
Tomer Tayardf697bc2019-02-28 10:46:22 +02003580 reg_offset = le32_to_cpu(wreg_pkt->ctl) &
3581 GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003582
3583 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3584 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3585 dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
3586
Oded Gabbay6765fda2019-02-28 10:46:14 +02003587 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003588 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3589 reg_offset);
3590 return -EPERM;
3591 }
3592
3593 /*
3594 * With MMU, DMA channels are not secured, so it doesn't matter where
3595 * the WR COMP will be written to because it will go out with
3596 * non-secured property
3597 */
3598 if (goya->hw_cap_initialized & HW_CAP_MMU)
3599 return 0;
3600
3601 sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3602 sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3603
Tomer Tayardf697bc2019-02-28 10:46:22 +02003604 if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
3605 (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003606
3607 dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3608 wreg_pkt->value);
3609 return -EPERM;
3610 }
3611
3612 return 0;
3613}
3614
3615static int goya_validate_cb(struct hl_device *hdev,
3616 struct hl_cs_parser *parser, bool is_mmu)
3617{
3618 u32 cb_parsed_length = 0;
3619 int rc = 0;
3620
3621 parser->patched_cb_size = 0;
3622
3623 /* cb_user_size is more than 0 so loop will always be executed */
3624 while (cb_parsed_length < parser->user_cb_size) {
3625 enum packet_id pkt_id;
3626 u16 pkt_size;
3627 void *user_pkt;
3628
3629 user_pkt = (void *) (uintptr_t)
3630 (parser->user_cb->kernel_address + cb_parsed_length);
3631
3632 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
3633 PACKET_HEADER_PACKET_ID_MASK) >>
3634 PACKET_HEADER_PACKET_ID_SHIFT);
3635
3636 pkt_size = goya_packet_sizes[pkt_id];
3637 cb_parsed_length += pkt_size;
3638 if (cb_parsed_length > parser->user_cb_size) {
3639 dev_err(hdev->dev,
3640 "packet 0x%x is out of CB boundary\n", pkt_id);
3641 rc = -EINVAL;
3642 break;
3643 }
3644
3645 switch (pkt_id) {
3646 case PACKET_WREG_32:
3647 /*
3648 * Although it is validated after copy in patch_cb(),
3649 * need to validate here as well because patch_cb() is
3650 * not called in MMU path while this function is called
3651 */
3652 rc = goya_validate_wreg32(hdev, parser, user_pkt);
3653 break;
3654
3655 case PACKET_WREG_BULK:
3656 dev_err(hdev->dev,
3657 "User not allowed to use WREG_BULK\n");
3658 rc = -EPERM;
3659 break;
3660
3661 case PACKET_MSG_PROT:
3662 dev_err(hdev->dev,
3663 "User not allowed to use MSG_PROT\n");
3664 rc = -EPERM;
3665 break;
3666
3667 case PACKET_CP_DMA:
3668 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3669 rc = -EPERM;
3670 break;
3671
3672 case PACKET_STOP:
3673 dev_err(hdev->dev, "User not allowed to use STOP\n");
3674 rc = -EPERM;
3675 break;
3676
3677 case PACKET_LIN_DMA:
3678 if (is_mmu)
3679 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3680 user_pkt);
3681 else
3682 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3683 user_pkt);
3684 break;
3685
3686 case PACKET_MSG_LONG:
3687 case PACKET_MSG_SHORT:
3688 case PACKET_FENCE:
3689 case PACKET_NOP:
3690 parser->patched_cb_size += pkt_size;
3691 break;
3692
3693 default:
3694 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3695 pkt_id);
3696 rc = -EINVAL;
3697 break;
3698 }
3699
3700 if (rc)
3701 break;
3702 }
3703
3704 /*
3705 * The new CB should have space at the end for two MSG_PROT packets:
3706 * 1. A packet that will act as a completion packet
3707 * 2. A packet that will generate MSI-X interrupt
3708 */
3709 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3710
3711 return rc;
3712}
3713
3714static int goya_patch_dma_packet(struct hl_device *hdev,
3715 struct hl_cs_parser *parser,
3716 struct packet_lin_dma *user_dma_pkt,
3717 struct packet_lin_dma *new_dma_pkt,
3718 u32 *new_dma_pkt_size)
3719{
3720 struct hl_userptr *userptr;
3721 struct scatterlist *sg, *sg_next_iter;
Oded Gabbaye99f16832019-02-24 11:55:26 +02003722 u32 count, dma_desc_cnt;
3723 u64 len, len_next;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003724 dma_addr_t dma_addr, dma_addr_next;
3725 enum goya_dma_direction user_dir;
3726 u64 device_memory_addr, addr;
3727 enum dma_data_direction dir;
3728 struct sg_table *sgt;
3729 bool skip_host_mem_pin = false;
3730 bool user_memset;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003731 u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003732
Tomer Tayardf697bc2019-02-28 10:46:22 +02003733 ctl = le32_to_cpu(user_dma_pkt->ctl);
3734
3735 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003736 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3737
Tomer Tayardf697bc2019-02-28 10:46:22 +02003738 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003739 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3740
3741 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
3742 (user_dma_pkt->tsize == 0)) {
3743 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3744 *new_dma_pkt_size = sizeof(*new_dma_pkt);
3745 return 0;
3746 }
3747
3748 if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
Tomer Tayardf697bc2019-02-28 10:46:22 +02003749 addr = le64_to_cpu(user_dma_pkt->src_addr);
3750 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003751 dir = DMA_TO_DEVICE;
3752 if (user_memset)
3753 skip_host_mem_pin = true;
3754 } else {
Tomer Tayardf697bc2019-02-28 10:46:22 +02003755 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3756 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003757 dir = DMA_FROM_DEVICE;
3758 }
3759
3760 if ((!skip_host_mem_pin) &&
Tomer Tayardf697bc2019-02-28 10:46:22 +02003761 (hl_userptr_is_pinned(hdev, addr,
3762 le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003763 parser->job_userptr_list, &userptr) == false)) {
3764 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3765 addr, user_dma_pkt->tsize);
3766 return -EFAULT;
3767 }
3768
3769 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3770 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3771 *new_dma_pkt_size = sizeof(*user_dma_pkt);
3772 return 0;
3773 }
3774
Tomer Tayardf697bc2019-02-28 10:46:22 +02003775 user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003776
Tomer Tayardf697bc2019-02-28 10:46:22 +02003777 user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003778
3779 sgt = userptr->sgt;
3780 dma_desc_cnt = 0;
3781
3782 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3783 len = sg_dma_len(sg);
3784 dma_addr = sg_dma_address(sg);
3785
3786 if (len == 0)
3787 break;
3788
3789 while ((count + 1) < sgt->nents) {
3790 sg_next_iter = sg_next(sg);
3791 len_next = sg_dma_len(sg_next_iter);
3792 dma_addr_next = sg_dma_address(sg_next_iter);
3793
3794 if (len_next == 0)
3795 break;
3796
3797 if ((dma_addr + len == dma_addr_next) &&
3798 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3799 len += len_next;
3800 count++;
3801 sg = sg_next_iter;
3802 } else {
3803 break;
3804 }
3805 }
3806
Tomer Tayardf697bc2019-02-28 10:46:22 +02003807 ctl = le32_to_cpu(user_dma_pkt->ctl);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003808 if (likely(dma_desc_cnt))
Tomer Tayardf697bc2019-02-28 10:46:22 +02003809 ctl &= ~GOYA_PKT_CTL_EB_MASK;
3810 ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
3811 GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
3812 new_dma_pkt->ctl = cpu_to_le32(ctl);
3813 new_dma_pkt->tsize = cpu_to_le32((u32) len);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003814
3815 dma_addr += hdev->asic_prop.host_phys_base_address;
3816
3817 if (dir == DMA_TO_DEVICE) {
Tomer Tayardf697bc2019-02-28 10:46:22 +02003818 new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
3819 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003820 } else {
Tomer Tayardf697bc2019-02-28 10:46:22 +02003821 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
3822 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003823 }
3824
3825 if (!user_memset)
3826 device_memory_addr += len;
3827 dma_desc_cnt++;
3828 new_dma_pkt++;
3829 }
3830
3831 if (!dma_desc_cnt) {
3832 dev_err(hdev->dev,
3833 "Error of 0 SG entries when patching DMA packet\n");
3834 return -EFAULT;
3835 }
3836
3837 /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
3838 new_dma_pkt--;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003839 new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003840
3841 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
3842
3843 return 0;
3844}
3845
3846static int goya_patch_cb(struct hl_device *hdev,
3847 struct hl_cs_parser *parser)
3848{
3849 u32 cb_parsed_length = 0;
3850 u32 cb_patched_cur_length = 0;
3851 int rc = 0;
3852
3853 /* cb_user_size is more than 0 so loop will always be executed */
3854 while (cb_parsed_length < parser->user_cb_size) {
3855 enum packet_id pkt_id;
3856 u16 pkt_size;
3857 u32 new_pkt_size = 0;
3858 void *user_pkt, *kernel_pkt;
3859
3860 user_pkt = (void *) (uintptr_t)
3861 (parser->user_cb->kernel_address + cb_parsed_length);
3862 kernel_pkt = (void *) (uintptr_t)
3863 (parser->patched_cb->kernel_address +
3864 cb_patched_cur_length);
3865
3866 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
3867 PACKET_HEADER_PACKET_ID_MASK) >>
3868 PACKET_HEADER_PACKET_ID_SHIFT);
3869
3870 pkt_size = goya_packet_sizes[pkt_id];
3871 cb_parsed_length += pkt_size;
3872 if (cb_parsed_length > parser->user_cb_size) {
3873 dev_err(hdev->dev,
3874 "packet 0x%x is out of CB boundary\n", pkt_id);
3875 rc = -EINVAL;
3876 break;
3877 }
3878
3879 switch (pkt_id) {
3880 case PACKET_LIN_DMA:
3881 rc = goya_patch_dma_packet(hdev, parser, user_pkt,
3882 kernel_pkt, &new_pkt_size);
3883 cb_patched_cur_length += new_pkt_size;
3884 break;
3885
3886 case PACKET_WREG_32:
3887 memcpy(kernel_pkt, user_pkt, pkt_size);
3888 cb_patched_cur_length += pkt_size;
3889 rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
3890 break;
3891
3892 case PACKET_WREG_BULK:
3893 dev_err(hdev->dev,
3894 "User not allowed to use WREG_BULK\n");
3895 rc = -EPERM;
3896 break;
3897
3898 case PACKET_MSG_PROT:
3899 dev_err(hdev->dev,
3900 "User not allowed to use MSG_PROT\n");
3901 rc = -EPERM;
3902 break;
3903
3904 case PACKET_CP_DMA:
3905 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3906 rc = -EPERM;
3907 break;
3908
3909 case PACKET_STOP:
3910 dev_err(hdev->dev, "User not allowed to use STOP\n");
3911 rc = -EPERM;
3912 break;
3913
3914 case PACKET_MSG_LONG:
3915 case PACKET_MSG_SHORT:
3916 case PACKET_FENCE:
3917 case PACKET_NOP:
3918 memcpy(kernel_pkt, user_pkt, pkt_size);
3919 cb_patched_cur_length += pkt_size;
3920 break;
3921
3922 default:
3923 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3924 pkt_id);
3925 rc = -EINVAL;
3926 break;
3927 }
3928
3929 if (rc)
3930 break;
3931 }
3932
3933 return rc;
3934}
3935
3936static int goya_parse_cb_mmu(struct hl_device *hdev,
3937 struct hl_cs_parser *parser)
3938{
3939 u64 patched_cb_handle;
3940 u32 patched_cb_size;
3941 struct hl_cb *user_cb;
3942 int rc;
3943
3944 /*
3945 * The new CB should have space at the end for two MSG_PROT pkt:
3946 * 1. A packet that will act as a completion packet
3947 * 2. A packet that will generate MSI-X interrupt
3948 */
3949 parser->patched_cb_size = parser->user_cb_size +
3950 sizeof(struct packet_msg_prot) * 2;
3951
3952 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
3953 parser->patched_cb_size,
3954 &patched_cb_handle, HL_KERNEL_ASID_ID);
3955
3956 if (rc) {
3957 dev_err(hdev->dev,
3958 "Failed to allocate patched CB for DMA CS %d\n",
3959 rc);
3960 return rc;
3961 }
3962
3963 patched_cb_handle >>= PAGE_SHIFT;
3964 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
3965 (u32) patched_cb_handle);
3966 /* hl_cb_get should never fail here so use kernel WARN */
3967 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
3968 (u32) patched_cb_handle);
3969 if (!parser->patched_cb) {
3970 rc = -EFAULT;
3971 goto out;
3972 }
3973
3974 /*
3975 * The check that parser->user_cb_size <= parser->user_cb->size was done
3976 * in validate_queue_index().
3977 */
3978 memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
3979 (void *) (uintptr_t) parser->user_cb->kernel_address,
3980 parser->user_cb_size);
3981
3982 patched_cb_size = parser->patched_cb_size;
3983
3984 /* validate patched CB instead of user CB */
3985 user_cb = parser->user_cb;
3986 parser->user_cb = parser->patched_cb;
3987 rc = goya_validate_cb(hdev, parser, true);
3988 parser->user_cb = user_cb;
3989
3990 if (rc) {
3991 hl_cb_put(parser->patched_cb);
3992 goto out;
3993 }
3994
3995 if (patched_cb_size != parser->patched_cb_size) {
3996 dev_err(hdev->dev, "user CB size mismatch\n");
3997 hl_cb_put(parser->patched_cb);
3998 rc = -EINVAL;
3999 goto out;
4000 }
4001
4002out:
4003 /*
4004 * Always call cb destroy here because we still have 1 reference
4005 * to it by calling cb_get earlier. After the job will be completed,
4006 * cb_put will release it, but here we want to remove it from the
4007 * idr
4008 */
4009 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4010 patched_cb_handle << PAGE_SHIFT);
4011
4012 return rc;
4013}
4014
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004015static int goya_parse_cb_no_mmu(struct hl_device *hdev,
4016 struct hl_cs_parser *parser)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004017{
4018 u64 patched_cb_handle;
4019 int rc;
4020
4021 rc = goya_validate_cb(hdev, parser, false);
4022
4023 if (rc)
4024 goto free_userptr;
4025
4026 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
4027 parser->patched_cb_size,
4028 &patched_cb_handle, HL_KERNEL_ASID_ID);
4029 if (rc) {
4030 dev_err(hdev->dev,
4031 "Failed to allocate patched CB for DMA CS %d\n", rc);
4032 goto free_userptr;
4033 }
4034
4035 patched_cb_handle >>= PAGE_SHIFT;
4036 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
4037 (u32) patched_cb_handle);
4038 /* hl_cb_get should never fail here so use kernel WARN */
4039 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
4040 (u32) patched_cb_handle);
4041 if (!parser->patched_cb) {
4042 rc = -EFAULT;
4043 goto out;
4044 }
4045
4046 rc = goya_patch_cb(hdev, parser);
4047
4048 if (rc)
4049 hl_cb_put(parser->patched_cb);
4050
4051out:
4052 /*
4053 * Always call cb destroy here because we still have 1 reference
4054 * to it by calling cb_get earlier. After the job will be completed,
4055 * cb_put will release it, but here we want to remove it from the
4056 * idr
4057 */
4058 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4059 patched_cb_handle << PAGE_SHIFT);
4060
4061free_userptr:
4062 if (rc)
4063 hl_userptr_delete_list(hdev, parser->job_userptr_list);
4064 return rc;
4065}
4066
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004067static int goya_parse_cb_no_ext_quque(struct hl_device *hdev,
4068 struct hl_cs_parser *parser)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004069{
4070 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
4071 struct goya_device *goya = hdev->asic_specific;
4072
4073 if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
4074 /* For internal queue jobs, just check if cb address is valid */
4075 if (hl_mem_area_inside_range(
4076 (u64) (uintptr_t) parser->user_cb,
4077 parser->user_cb_size,
4078 asic_prop->sram_user_base_address,
4079 asic_prop->sram_end_address))
4080 return 0;
4081
4082 if (hl_mem_area_inside_range(
4083 (u64) (uintptr_t) parser->user_cb,
4084 parser->user_cb_size,
4085 asic_prop->dram_user_base_address,
4086 asic_prop->dram_end_address))
4087 return 0;
4088
4089 dev_err(hdev->dev,
Oded Gabbaydbbe358b2019-03-02 11:43:12 +02004090 "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
Oded Gabbayefaa2812019-02-28 11:55:45 +02004091 parser->user_cb, parser->user_cb_size);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004092
4093 return -EFAULT;
4094 }
4095
4096 return 0;
4097}
4098
4099int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
4100{
4101 struct goya_device *goya = hdev->asic_specific;
4102
4103 if (!parser->ext_queue)
4104 return goya_parse_cb_no_ext_quque(hdev, parser);
4105
4106 if ((goya->hw_cap_initialized & HW_CAP_MMU) && parser->use_virt_addr)
4107 return goya_parse_cb_mmu(hdev, parser);
4108 else
4109 return goya_parse_cb_no_mmu(hdev, parser);
4110}
4111
4112void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
4113 u32 cq_val, u32 msix_vec)
4114{
4115 struct packet_msg_prot *cq_pkt;
Tomer Tayardf697bc2019-02-28 10:46:22 +02004116 u32 tmp;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004117
4118 cq_pkt = (struct packet_msg_prot *) (uintptr_t)
4119 (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
4120
Tomer Tayardf697bc2019-02-28 10:46:22 +02004121 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004122 (1 << GOYA_PKT_CTL_EB_SHIFT) |
4123 (1 << GOYA_PKT_CTL_MB_SHIFT);
Tomer Tayardf697bc2019-02-28 10:46:22 +02004124 cq_pkt->ctl = cpu_to_le32(tmp);
4125 cq_pkt->value = cpu_to_le32(cq_val);
4126 cq_pkt->addr = cpu_to_le64(cq_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004127
4128 cq_pkt++;
4129
Tomer Tayardf697bc2019-02-28 10:46:22 +02004130 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004131 (1 << GOYA_PKT_CTL_MB_SHIFT);
Tomer Tayardf697bc2019-02-28 10:46:22 +02004132 cq_pkt->ctl = cpu_to_le32(tmp);
4133 cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
4134 cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004135}
4136
Oded Gabbay1251f232019-02-16 00:39:18 +02004137static void goya_update_eq_ci(struct hl_device *hdev, u32 val)
4138{
4139 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
4140}
4141
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004142static void goya_restore_phase_topology(struct hl_device *hdev)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004143{
4144 int i, num_of_sob_in_longs, num_of_mon_in_longs;
4145
4146 num_of_sob_in_longs =
4147 ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
4148
4149 num_of_mon_in_longs =
4150 ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
4151
4152 for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
4153 WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
4154
4155 for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
4156 WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
4157
4158 /* Flush all WREG to prevent race */
4159 i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
4160}
4161
Oded Gabbayc2164772019-02-16 00:39:24 +02004162/*
4163 * goya_debugfs_read32 - read a 32bit value from a given device address
4164 *
4165 * @hdev: pointer to hl_device structure
4166 * @addr: address in device
4167 * @val: returned value
4168 *
4169 * In case of DDR address that is not mapped into the default aperture that
4170 * the DDR bar exposes, the function will configure the iATU so that the DDR
4171 * bar will be positioned at a base address that allows reading from the
4172 * required address. Configuring the iATU during normal operation can
4173 * lead to undefined behavior and therefore, should be done with extreme care
4174 *
4175 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004176static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
Oded Gabbayc2164772019-02-16 00:39:24 +02004177{
4178 struct asic_fixed_properties *prop = &hdev->asic_prop;
4179 int rc = 0;
4180
4181 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4182 *val = RREG32(addr - CFG_BASE);
4183
4184 } else if ((addr >= SRAM_BASE_ADDR) &&
4185 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4186
4187 *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4188 (addr - SRAM_BASE_ADDR));
4189
4190 } else if ((addr >= DRAM_PHYS_BASE) &&
4191 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4192
4193 u64 bar_base_addr = DRAM_PHYS_BASE +
4194 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4195
4196 rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
4197 if (!rc) {
4198 *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
4199 (addr - bar_base_addr));
4200
4201 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
4202 (MMU_PAGE_TABLES_ADDR &
4203 ~(prop->dram_pci_bar_size - 0x1ull)));
4204 }
4205 } else {
4206 rc = -EFAULT;
4207 }
4208
4209 return rc;
4210}
4211
4212/*
4213 * goya_debugfs_write32 - write a 32bit value to a given device address
4214 *
4215 * @hdev: pointer to hl_device structure
4216 * @addr: address in device
4217 * @val: returned value
4218 *
4219 * In case of DDR address that is not mapped into the default aperture that
4220 * the DDR bar exposes, the function will configure the iATU so that the DDR
4221 * bar will be positioned at a base address that allows writing to the
4222 * required address. Configuring the iATU during normal operation can
4223 * lead to undefined behavior and therefore, should be done with extreme care
4224 *
4225 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004226static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
Oded Gabbayc2164772019-02-16 00:39:24 +02004227{
4228 struct asic_fixed_properties *prop = &hdev->asic_prop;
4229 int rc = 0;
4230
4231 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4232 WREG32(addr - CFG_BASE, val);
4233
4234 } else if ((addr >= SRAM_BASE_ADDR) &&
4235 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4236
4237 writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4238 (addr - SRAM_BASE_ADDR));
4239
4240 } else if ((addr >= DRAM_PHYS_BASE) &&
4241 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4242
4243 u64 bar_base_addr = DRAM_PHYS_BASE +
4244 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4245
4246 rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
4247 if (!rc) {
4248 writel(val, hdev->pcie_bar[DDR_BAR_ID] +
4249 (addr - bar_base_addr));
4250
4251 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
4252 (MMU_PAGE_TABLES_ADDR &
4253 ~(prop->dram_pci_bar_size - 0x1ull)));
4254 }
4255 } else {
4256 rc = -EFAULT;
4257 }
4258
4259 return rc;
4260}
4261
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004262static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4263{
4264 struct goya_device *goya = hdev->asic_specific;
4265
4266 return readq(hdev->pcie_bar[DDR_BAR_ID] +
4267 (addr - goya->ddr_bar_cur_addr));
4268}
4269
4270static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4271{
4272 struct goya_device *goya = hdev->asic_specific;
4273
4274 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4275 (addr - goya->ddr_bar_cur_addr));
4276}
4277
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004278static const char *_goya_get_event_desc(u16 event_type)
Oded Gabbay1251f232019-02-16 00:39:18 +02004279{
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004280 switch (event_type) {
4281 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4282 return "PCIe_dec";
4283 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4284 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4285 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4286 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4287 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4288 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4289 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4290 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4291 return "TPC%d_dec";
4292 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4293 return "MME_wacs";
4294 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4295 return "MME_wacsd";
4296 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4297 return "CPU_axi_splitter";
4298 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4299 return "PSOC_axi_dec";
4300 case GOYA_ASYNC_EVENT_ID_PSOC:
4301 return "PSOC";
4302 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4303 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4304 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4305 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4306 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4307 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4308 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4309 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4310 return "TPC%d_krn_err";
4311 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4312 return "TPC%d_cq";
4313 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4314 return "TPC%d_qm";
4315 case GOYA_ASYNC_EVENT_ID_MME_QM:
4316 return "MME_qm";
4317 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4318 return "MME_cq";
4319 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4320 return "DMA%d_qm";
4321 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4322 return "DMA%d_ch";
4323 default:
4324 return "N/A";
4325 }
Oded Gabbay1251f232019-02-16 00:39:18 +02004326}
4327
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004328static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
Oded Gabbay1251f232019-02-16 00:39:18 +02004329{
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004330 u8 index;
Oded Gabbay1251f232019-02-16 00:39:18 +02004331
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004332 switch (event_type) {
4333 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4334 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4335 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4336 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4337 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4338 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4339 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4340 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4341 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4342 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4343 break;
4344 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4345 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4346 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4347 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4348 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4349 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4350 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4351 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4352 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4353 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4354 break;
4355 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4356 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4357 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4358 break;
4359 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4360 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4361 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4362 break;
4363 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4364 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4365 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4366 break;
4367 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4368 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4369 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4370 break;
4371 default:
4372 snprintf(desc, size, _goya_get_event_desc(event_type));
4373 break;
4374 }
4375}
Oded Gabbay1251f232019-02-16 00:39:18 +02004376
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004377static void goya_print_razwi_info(struct hl_device *hdev)
4378{
4379 if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4380 dev_err(hdev->dev, "Illegal write to LBW\n");
4381 WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4382 }
Oded Gabbay1251f232019-02-16 00:39:18 +02004383
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004384 if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4385 dev_err(hdev->dev, "Illegal read from LBW\n");
4386 WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4387 }
4388
4389 if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4390 dev_err(hdev->dev, "Illegal write to HBW\n");
4391 WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4392 }
4393
4394 if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4395 dev_err(hdev->dev, "Illegal read from HBW\n");
4396 WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4397 }
4398}
4399
4400static void goya_print_mmu_error_info(struct hl_device *hdev)
4401{
4402 struct goya_device *goya = hdev->asic_specific;
4403 u64 addr;
4404 u32 val;
4405
4406 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4407 return;
4408
4409 val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4410 if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4411 addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4412 addr <<= 32;
4413 addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4414
4415 dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr);
4416
4417 WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
Oded Gabbay1251f232019-02-16 00:39:18 +02004418 }
4419}
4420
4421static void goya_print_irq_info(struct hl_device *hdev, u16 event_type)
4422{
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004423 char desc[20] = "";
Oded Gabbay1251f232019-02-16 00:39:18 +02004424
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004425 goya_get_event_desc(event_type, desc, sizeof(desc));
4426 dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4427 event_type, desc);
Oded Gabbay1251f232019-02-16 00:39:18 +02004428
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004429 goya_print_razwi_info(hdev);
4430 goya_print_mmu_error_info(hdev);
Oded Gabbay1251f232019-02-16 00:39:18 +02004431}
4432
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004433static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4434 size_t irq_arr_size)
4435{
4436 struct armcp_unmask_irq_arr_packet *pkt;
4437 size_t total_pkt_size;
4438 long result;
4439 int rc;
4440
4441 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
4442 irq_arr_size;
4443
4444 /* data should be aligned to 8 bytes in order to ArmCP to copy it */
4445 total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4446
4447 /* total_pkt_size is casted to u16 later on */
4448 if (total_pkt_size > USHRT_MAX) {
4449 dev_err(hdev->dev, "too many elements in IRQ array\n");
4450 return -EINVAL;
4451 }
4452
4453 pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4454 if (!pkt)
4455 return -ENOMEM;
4456
Tomer Tayardf697bc2019-02-28 10:46:22 +02004457 pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004458 memcpy(&pkt->irqs, irq_arr, irq_arr_size);
4459
Tomer Tayardf697bc2019-02-28 10:46:22 +02004460 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4461 ARMCP_PKT_CTL_OPCODE_SHIFT);
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004462
4463 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
4464 total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
4465
4466 if (rc)
4467 dev_err(hdev->dev, "failed to unmask IRQ array\n");
4468
4469 kfree(pkt);
4470
4471 return rc;
4472}
4473
4474static int goya_soft_reset_late_init(struct hl_device *hdev)
4475{
4476 /*
4477 * Unmask all IRQs since some could have been received
4478 * during the soft reset
4479 */
Oded Gabbayb24ca452019-02-24 15:50:53 +02004480 return goya_unmask_irq_arr(hdev, goya_all_events,
4481 sizeof(goya_all_events));
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004482}
4483
Oded Gabbay1251f232019-02-16 00:39:18 +02004484static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4485{
4486 struct armcp_packet pkt;
4487 long result;
4488 int rc;
4489
4490 memset(&pkt, 0, sizeof(pkt));
4491
Tomer Tayardf697bc2019-02-28 10:46:22 +02004492 pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
4493 ARMCP_PKT_CTL_OPCODE_SHIFT);
4494 pkt.value = cpu_to_le64(event_type);
Oded Gabbay1251f232019-02-16 00:39:18 +02004495
4496 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4497 HL_DEVICE_TIMEOUT_USEC, &result);
4498
4499 if (rc)
4500 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4501
4502 return rc;
4503}
4504
4505void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4506{
Tomer Tayardf697bc2019-02-28 10:46:22 +02004507 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
4508 u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
4509 >> EQ_CTL_EVENT_TYPE_SHIFT);
Oded Gabbay1251f232019-02-16 00:39:18 +02004510 struct goya_device *goya = hdev->asic_specific;
4511
4512 goya->events_stat[event_type]++;
4513
4514 switch (event_type) {
4515 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4516 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4517 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4518 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4519 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4520 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4521 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4522 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4523 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4524 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4525 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4526 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4527 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4528 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4529 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4530 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4531 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4532 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4533 case GOYA_ASYNC_EVENT_ID_GIC500:
4534 case GOYA_ASYNC_EVENT_ID_PLL0:
4535 case GOYA_ASYNC_EVENT_ID_PLL1:
4536 case GOYA_ASYNC_EVENT_ID_PLL3:
4537 case GOYA_ASYNC_EVENT_ID_PLL4:
4538 case GOYA_ASYNC_EVENT_ID_PLL5:
4539 case GOYA_ASYNC_EVENT_ID_PLL6:
4540 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4541 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4542 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4543 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4544 dev_err(hdev->dev,
4545 "Received H/W interrupt %d, reset the chip\n",
4546 event_type);
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004547 hl_device_reset(hdev, true, false);
Oded Gabbay1251f232019-02-16 00:39:18 +02004548 break;
4549
4550 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4551 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4552 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4553 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4554 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4555 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4556 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4557 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4558 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4559 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4560 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4561 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4562 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4563 case GOYA_ASYNC_EVENT_ID_PSOC:
4564 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4565 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4566 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4567 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4568 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4569 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4570 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4571 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4572 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4573 case GOYA_ASYNC_EVENT_ID_MME_QM:
4574 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4575 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4576 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4577 goya_print_irq_info(hdev, event_type);
4578 goya_unmask_irq(hdev, event_type);
4579 break;
4580
4581 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4582 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4583 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4584 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4585 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4586 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4587 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4588 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4589 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0:
4590 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH1:
4591 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH2:
4592 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH3:
4593 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4594 dev_info(hdev->dev, "Received H/W interrupt %d\n", event_type);
4595 break;
4596
4597 default:
4598 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4599 event_type);
4600 break;
4601 }
4602}
4603
4604void *goya_get_events_stat(struct hl_device *hdev, u32 *size)
4605{
4606 struct goya_device *goya = hdev->asic_specific;
4607
4608 *size = (u32) sizeof(goya->events_stat);
4609
4610 return goya->events_stat;
4611}
4612
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004613static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
4614 u64 val, bool is_dram)
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004615{
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004616 struct packet_lin_dma *lin_dma_pkt;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004617 struct hl_cs_parser parser;
4618 struct hl_cs_job *job;
Tomer Tayardf697bc2019-02-28 10:46:22 +02004619 u32 cb_size, ctl;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004620 struct hl_cb *cb;
4621 int rc;
4622
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004623 cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
4624 if (!cb)
4625 return -EFAULT;
4626
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004627 lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004628
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004629 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4630 cb_size = sizeof(*lin_dma_pkt);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004631
Tomer Tayardf697bc2019-02-28 10:46:22 +02004632 ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4633 (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4634 (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4635 (1 << GOYA_PKT_CTL_RB_SHIFT) |
4636 (1 << GOYA_PKT_CTL_MB_SHIFT));
4637 ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
4638 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4639 lin_dma_pkt->ctl = cpu_to_le32(ctl);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004640
Tomer Tayardf697bc2019-02-28 10:46:22 +02004641 lin_dma_pkt->src_addr = cpu_to_le64(val);
4642 lin_dma_pkt->dst_addr = cpu_to_le64(addr);
4643 lin_dma_pkt->tsize = cpu_to_le32(size);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004644
4645 job = hl_cs_allocate_job(hdev, true);
4646 if (!job) {
4647 dev_err(hdev->dev, "Failed to allocate a new job\n");
4648 rc = -ENOMEM;
4649 goto release_cb;
4650 }
4651
4652 job->id = 0;
4653 job->user_cb = cb;
4654 job->user_cb->cs_cnt++;
4655 job->user_cb_size = cb_size;
4656 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4657
Oded Gabbayc2164772019-02-16 00:39:24 +02004658 hl_debugfs_add_job(hdev, job);
4659
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004660 parser.ctx_id = HL_KERNEL_ASID_ID;
4661 parser.cs_sequence = 0;
4662 parser.job_id = job->id;
4663 parser.hw_queue_id = job->hw_queue_id;
4664 parser.job_userptr_list = &job->userptr_list;
4665 parser.user_cb = job->user_cb;
4666 parser.user_cb_size = job->user_cb_size;
4667 parser.ext_queue = job->ext_queue;
4668 parser.use_virt_addr = hdev->mmu_enable;
4669
4670 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
4671 if (rc) {
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004672 dev_err(hdev->dev, "Failed to parse kernel CB\n");
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004673 goto free_job;
4674 }
4675
4676 job->patched_cb = parser.patched_cb;
4677 job->job_cb_size = parser.patched_cb_size;
4678 job->patched_cb->cs_cnt++;
4679
4680 rc = goya_send_job_on_qman0(hdev, job);
4681
4682 job->patched_cb->cs_cnt--;
4683 hl_cb_put(job->patched_cb);
4684
4685free_job:
4686 hl_userptr_delete_list(hdev, &job->userptr_list);
Oded Gabbayc2164772019-02-16 00:39:24 +02004687 hl_debugfs_remove_job(hdev, job);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004688 kfree(job);
4689 cb->cs_cnt--;
4690
4691release_cb:
4692 hl_cb_put(cb);
4693 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
4694
4695 return rc;
4696}
4697
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004698static int goya_context_switch(struct hl_device *hdev, u32 asid)
4699{
4700 struct asic_fixed_properties *prop = &hdev->asic_prop;
4701 u64 addr = prop->sram_base_address;
4702 u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4703 u64 val = 0x7777777777777777ull;
4704 int rc;
4705
4706 rc = goya_memset_device_memory(hdev, addr, size, val, false);
4707 if (rc) {
4708 dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4709 return rc;
4710 }
4711
4712 goya_mmu_prepare(hdev, asid);
4713
4714 return 0;
4715}
4716
4717static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4718{
4719 struct asic_fixed_properties *prop = &hdev->asic_prop;
4720 struct goya_device *goya = hdev->asic_specific;
4721 u64 addr = prop->mmu_pgt_addr;
4722 u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4723 MMU_CACHE_MNG_SIZE;
4724
4725 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4726 return 0;
4727
4728 return goya_memset_device_memory(hdev, addr, size, 0, true);
4729}
4730
4731static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4732{
4733 struct goya_device *goya = hdev->asic_specific;
4734 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
4735 u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
4736 u64 val = 0x9999999999999999ull;
4737
4738 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4739 return 0;
4740
4741 return goya_memset_device_memory(hdev, addr, size, val, true);
4742}
4743
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004744static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
4745{
4746 struct goya_device *goya = hdev->asic_specific;
4747 int i;
4748
4749 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4750 return;
4751
4752 if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
4753 WARN(1, "asid %u is too big\n", asid);
4754 return;
4755 }
4756
4757 /* zero the MMBP and ASID bits and then set the ASID */
4758 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) {
4759 WREG32_AND(goya_mmu_regs[i], ~0x7FF);
4760 WREG32_OR(goya_mmu_regs[i], asid);
4761 }
4762}
4763
4764static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
4765{
4766 struct goya_device *goya = hdev->asic_specific;
4767 u32 status, timeout_usec;
4768 int rc;
4769
4770 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4771 return;
4772
4773 /* no need in L1 only invalidation in Goya */
4774 if (!is_hard)
4775 return;
4776
4777 if (hdev->pldm)
4778 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4779 else
4780 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4781
4782 mutex_lock(&hdev->mmu_cache_lock);
4783
4784 /* L0 & L1 invalidation */
4785 WREG32(mmSTLB_INV_ALL_START, 1);
4786
4787 rc = hl_poll_timeout(
4788 hdev,
4789 mmSTLB_INV_ALL_START,
4790 status,
4791 !status,
4792 1000,
4793 timeout_usec);
4794
4795 mutex_unlock(&hdev->mmu_cache_lock);
4796
4797 if (rc)
4798 dev_notice_ratelimited(hdev->dev,
4799 "Timeout when waiting for MMU cache invalidation\n");
4800}
4801
4802static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
4803 bool is_hard, u32 asid, u64 va, u64 size)
4804{
4805 struct goya_device *goya = hdev->asic_specific;
4806 u32 status, timeout_usec, inv_data, pi;
4807 int rc;
4808
4809 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4810 return;
4811
4812 /* no need in L1 only invalidation in Goya */
4813 if (!is_hard)
4814 return;
4815
4816 if (hdev->pldm)
4817 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4818 else
4819 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4820
4821 mutex_lock(&hdev->mmu_cache_lock);
4822
4823 /*
4824 * TODO: currently invalidate entire L0 & L1 as in regular hard
4825 * invalidation. Need to apply invalidation of specific cache lines with
4826 * mask of ASID & VA & size.
4827 * Note that L1 with be flushed entirely in any case.
4828 */
4829
4830 /* L0 & L1 invalidation */
4831 inv_data = RREG32(mmSTLB_CACHE_INV);
4832 /* PI is 8 bit */
4833 pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
4834 WREG32(mmSTLB_CACHE_INV,
4835 (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
4836
4837 rc = hl_poll_timeout(
4838 hdev,
4839 mmSTLB_INV_CONSUMER_INDEX,
4840 status,
4841 status == pi,
4842 1000,
4843 timeout_usec);
4844
4845 mutex_unlock(&hdev->mmu_cache_lock);
4846
4847 if (rc)
4848 dev_notice_ratelimited(hdev->dev,
4849 "Timeout when waiting for MMU cache invalidation\n");
4850}
4851
4852static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
4853 u64 phys_addr)
4854{
4855 u32 status, timeout_usec;
4856 int rc;
4857
4858 if (hdev->pldm)
4859 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4860 else
4861 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4862
4863 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
4864 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
4865 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
4866
4867 rc = hl_poll_timeout(
4868 hdev,
4869 MMU_ASID_BUSY,
4870 status,
4871 !(status & 0x80000000),
4872 1000,
4873 timeout_usec);
4874
4875 if (rc) {
4876 dev_err(hdev->dev,
4877 "Timeout during MMU hop0 config of asid %d\n", asid);
4878 return rc;
4879 }
4880
4881 return 0;
4882}
4883
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004884int goya_send_heartbeat(struct hl_device *hdev)
4885{
4886 struct goya_device *goya = hdev->asic_specific;
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004887
4888 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
4889 return 0;
4890
Tomer Tayar3110c602019-03-04 10:22:09 +02004891 return hl_fw_send_heartbeat(hdev);
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004892}
4893
Oded Gabbayd91389b2019-02-16 00:39:19 +02004894static int goya_armcp_info_get(struct hl_device *hdev)
4895{
4896 struct goya_device *goya = hdev->asic_specific;
4897 struct asic_fixed_properties *prop = &hdev->asic_prop;
Oded Gabbayd91389b2019-02-16 00:39:19 +02004898 u64 dram_size;
Oded Gabbayd91389b2019-02-16 00:39:19 +02004899 int rc;
4900
4901 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
4902 return 0;
4903
Tomer Tayar3110c602019-03-04 10:22:09 +02004904 rc = hl_fw_armcp_info_get(hdev);
4905 if (rc)
4906 return rc;
Oded Gabbayd91389b2019-02-16 00:39:19 +02004907
Tomer Tayardf697bc2019-02-28 10:46:22 +02004908 dram_size = le64_to_cpu(prop->armcp_info.dram_size);
Oded Gabbayd91389b2019-02-16 00:39:19 +02004909 if (dram_size) {
4910 if ((!is_power_of_2(dram_size)) ||
4911 (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
4912 dev_err(hdev->dev,
4913 "F/W reported invalid DRAM size %llu. Trying to use default size\n",
4914 dram_size);
4915 dram_size = DRAM_PHYS_DEFAULT_SIZE;
4916 }
4917
4918 prop->dram_size = dram_size;
4919 prop->dram_end_address = prop->dram_base_address + dram_size;
4920 }
4921
Tomer Tayar3110c602019-03-04 10:22:09 +02004922 return 0;
Oded Gabbayd91389b2019-02-16 00:39:19 +02004923}
4924
4925static void goya_init_clock_gating(struct hl_device *hdev)
4926{
4927
4928}
4929
4930static void goya_disable_clock_gating(struct hl_device *hdev)
4931{
4932
4933}
Oded Gabbay9494a8d2019-02-16 00:39:17 +02004934
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004935static bool goya_is_device_idle(struct hl_device *hdev)
4936{
4937 u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
4938 int i;
4939
4940 offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
4941
4942 for (i = 0 ; i < DMA_MAX_NUM ; i++) {
4943 dma_qm_reg = mmDMA_QM_0_GLBL_STS0 + i * offset;
4944
4945 if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
4946 DMA_QM_IDLE_MASK)
4947 return false;
4948 }
4949
4950 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
4951
4952 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
4953 tpc_qm_reg = mmTPC0_QM_GLBL_STS0 + i * offset;
4954 tpc_cmdq_reg = mmTPC0_CMDQ_GLBL_STS0 + i * offset;
4955 tpc_cfg_reg = mmTPC0_CFG_STATUS + i * offset;
4956
4957 if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
4958 TPC_QM_IDLE_MASK)
4959 return false;
4960
4961 if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
4962 TPC_CMDQ_IDLE_MASK)
4963 return false;
4964
4965 if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
4966 TPC_CFG_IDLE_MASK)
4967 return false;
4968 }
4969
4970 if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
4971 MME_QM_IDLE_MASK)
4972 return false;
4973
4974 if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
4975 MME_CMDQ_IDLE_MASK)
4976 return false;
4977
4978 if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
4979 MME_ARCH_IDLE_MASK)
4980 return false;
4981
4982 if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
4983 return false;
4984
4985 return true;
4986}
4987
Oded Gabbay9494a8d2019-02-16 00:39:17 +02004988static void goya_hw_queues_lock(struct hl_device *hdev)
4989{
4990 struct goya_device *goya = hdev->asic_specific;
4991
4992 spin_lock(&goya->hw_queues_lock);
4993}
4994
4995static void goya_hw_queues_unlock(struct hl_device *hdev)
4996{
4997 struct goya_device *goya = hdev->asic_specific;
4998
4999 spin_unlock(&goya->hw_queues_lock);
5000}
5001
Oded Gabbayd8dd7b02019-02-16 00:39:23 +02005002static u32 goya_get_pci_id(struct hl_device *hdev)
5003{
5004 return hdev->pdev->device;
5005}
5006
Oded Gabbay5e6e0232019-02-27 12:15:16 +02005007static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5008 size_t max_size)
Oded Gabbayd91389b2019-02-16 00:39:19 +02005009{
5010 struct goya_device *goya = hdev->asic_specific;
Oded Gabbayd91389b2019-02-16 00:39:19 +02005011
5012 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5013 return 0;
5014
Tomer Tayar3110c602019-03-04 10:22:09 +02005015 return hl_fw_get_eeprom_data(hdev, data, max_size);
Oded Gabbayd91389b2019-02-16 00:39:19 +02005016}
5017
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005018static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
5019{
5020 return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS);
5021}
5022
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005023static const struct hl_asic_funcs goya_funcs = {
5024 .early_init = goya_early_init,
5025 .early_fini = goya_early_fini,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005026 .late_init = goya_late_init,
5027 .late_fini = goya_late_fini,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005028 .sw_init = goya_sw_init,
5029 .sw_fini = goya_sw_fini,
Oded Gabbay839c4802019-02-16 00:39:16 +02005030 .hw_init = goya_hw_init,
5031 .hw_fini = goya_hw_fini,
Oded Gabbay1251f232019-02-16 00:39:18 +02005032 .halt_engines = goya_halt_engines,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005033 .suspend = goya_suspend,
5034 .resume = goya_resume,
Oded Gabbaybe5d9262019-02-16 00:39:15 +02005035 .cb_mmap = goya_cb_mmap,
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005036 .ring_doorbell = goya_ring_doorbell,
5037 .flush_pq_write = goya_flush_pq_write,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005038 .dma_alloc_coherent = goya_dma_alloc_coherent,
5039 .dma_free_coherent = goya_dma_free_coherent,
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005040 .get_int_queue_base = goya_get_int_queue_base,
5041 .test_queues = goya_test_queues,
5042 .dma_pool_zalloc = goya_dma_pool_zalloc,
5043 .dma_pool_free = goya_dma_pool_free,
5044 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
5045 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02005046 .hl_dma_unmap_sg = goya_dma_unmap_sg,
5047 .cs_parser = goya_cs_parser,
5048 .asic_dma_map_sg = goya_dma_map_sg,
5049 .get_dma_desc_list_size = goya_get_dma_desc_list_size,
5050 .add_end_of_cb_packets = goya_add_end_of_cb_packets,
Oded Gabbay1251f232019-02-16 00:39:18 +02005051 .update_eq_ci = goya_update_eq_ci,
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02005052 .context_switch = goya_context_switch,
5053 .restore_phase_topology = goya_restore_phase_topology,
Oded Gabbayc2164772019-02-16 00:39:24 +02005054 .debugfs_read32 = goya_debugfs_read32,
5055 .debugfs_write32 = goya_debugfs_write32,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005056 .add_device_attr = goya_add_device_attr,
Oded Gabbay1251f232019-02-16 00:39:18 +02005057 .handle_eqe = goya_handle_eqe,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005058 .set_pll_profile = goya_set_pll_profile,
Oded Gabbay1251f232019-02-16 00:39:18 +02005059 .get_events_stat = goya_get_events_stat,
Omer Shpigelman0feaf862019-02-16 00:39:22 +02005060 .read_pte = goya_read_pte,
5061 .write_pte = goya_write_pte,
5062 .mmu_invalidate_cache = goya_mmu_invalidate_cache,
5063 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005064 .send_heartbeat = goya_send_heartbeat,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005065 .enable_clock_gating = goya_init_clock_gating,
5066 .disable_clock_gating = goya_disable_clock_gating,
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02005067 .is_device_idle = goya_is_device_idle,
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005068 .soft_reset_late_init = goya_soft_reset_late_init,
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005069 .hw_queues_lock = goya_hw_queues_lock,
5070 .hw_queues_unlock = goya_hw_queues_unlock,
Oded Gabbayd8dd7b02019-02-16 00:39:23 +02005071 .get_pci_id = goya_get_pci_id,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005072 .get_eeprom_data = goya_get_eeprom_data,
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005073 .send_cpu_message = goya_send_cpu_message,
5074 .get_hw_state = goya_get_hw_state
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005075};
5076
5077/*
5078 * goya_set_asic_funcs - set Goya function pointers
5079 *
5080 * @*hdev: pointer to hl_device structure
5081 *
5082 */
5083void goya_set_asic_funcs(struct hl_device *hdev)
5084{
5085 hdev->asic_funcs = &goya_funcs;
5086}