blob: c69f3b9283505d060dd37e79c6a14bda263e4561 [file] [log] [blame]
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02001// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "goyaP.h"
Omer Shpigelman0feaf862019-02-16 00:39:22 +02009#include "include/hw_ip/mmu/mmu_general.h"
10#include "include/hw_ip/mmu/mmu_v1_0.h"
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020011#include "include/goya/asic_reg/goya_masks.h"
12
13#include <linux/pci.h>
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020014#include <linux/genalloc.h>
Oded Gabbayd91389b2019-02-16 00:39:19 +020015#include <linux/hwmon.h>
Oded Gabbay839c4802019-02-16 00:39:16 +020016#include <linux/io-64-nonatomic-lo-hi.h>
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020017
18/*
19 * GOYA security scheme:
20 *
21 * 1. Host is protected by:
22 * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
23 * - MMU
24 *
25 * 2. DRAM is protected by:
26 * - Range registers (protect the first 512MB)
27 * - MMU (isolation between users)
28 *
29 * 3. Configuration is protected by:
30 * - Range registers
31 * - Protection bits
32 *
33 * When MMU is disabled:
34 *
35 * QMAN DMA: PQ, CQ, CP, DMA are secured.
36 * PQ, CB and the data are on the host.
37 *
38 * QMAN TPC/MME:
39 * PQ, CQ and CP are not secured.
40 * PQ, CB and the data are on the SRAM/DRAM.
41 *
42 * Since QMAN DMA is secured, KMD is parsing the DMA CB:
43 * - KMD checks DMA pointer
44 * - WREG, MSG_PROT are not allowed.
45 * - MSG_LONG/SHORT are allowed.
46 *
47 * A read/write transaction by the QMAN to a protected area will succeed if
48 * and only if the QMAN's CP is secured and MSG_PROT is used
49 *
50 *
51 * When MMU is enabled:
52 *
53 * QMAN DMA: PQ, CQ and CP are secured.
54 * MMU is set to bypass on the Secure props register of the QMAN.
55 * The reasons we don't enable MMU for PQ, CQ and CP are:
56 * - PQ entry is in kernel address space and KMD doesn't map it.
57 * - CP writes to MSIX register and to kernel address space (completion
58 * queue).
59 *
60 * DMA is not secured but because CP is secured, KMD still needs to parse the
61 * CB, but doesn't need to check the DMA addresses.
62 *
63 * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD
64 * doesn't map memory in MMU.
65 *
66 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
67 *
68 * DMA RR does NOT protect host because DMA is not secured
69 *
70 */
71
72#define GOYA_MMU_REGS_NUM 61
73
74#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
75
76#define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
77#define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
78#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
79#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
80#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
81#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
82#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
Omer Shpigelman0feaf862019-02-16 00:39:22 +020083#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
Omer Shpigelman3dccd182019-02-28 10:46:16 +020084#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020085
86#define GOYA_QMAN0_FENCE_VAL 0xD169B243
87
88#define GOYA_MAX_INITIATORS 20
89
Oded Gabbay1251f232019-02-16 00:39:18 +020090#define GOYA_MAX_STRING_LEN 20
91
Oded Gabbaybe5d9262019-02-16 00:39:15 +020092#define GOYA_CB_POOL_CB_CNT 512
93#define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
94
Oded Gabbay1251f232019-02-16 00:39:18 +020095static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
96 "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
97 "goya cq 4", "goya cpu eq"
98};
99
Oded Gabbayeff6f4a2019-02-16 00:39:21 +0200100static u16 goya_packet_sizes[MAX_PACKET_ID] = {
101 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
102 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
103 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
104 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
105 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
106 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
107 [PACKET_FENCE] = sizeof(struct packet_fence),
108 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
109 [PACKET_NOP] = sizeof(struct packet_nop),
110 [PACKET_STOP] = sizeof(struct packet_stop)
111};
112
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200113static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
114 mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
115 mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
116 mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
117 mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
118 mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
119 mmTPC0_QM_GLBL_SECURE_PROPS,
120 mmTPC0_QM_GLBL_NON_SECURE_PROPS,
121 mmTPC0_CMDQ_GLBL_SECURE_PROPS,
122 mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
123 mmTPC0_CFG_ARUSER,
124 mmTPC0_CFG_AWUSER,
125 mmTPC1_QM_GLBL_SECURE_PROPS,
126 mmTPC1_QM_GLBL_NON_SECURE_PROPS,
127 mmTPC1_CMDQ_GLBL_SECURE_PROPS,
128 mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
129 mmTPC1_CFG_ARUSER,
130 mmTPC1_CFG_AWUSER,
131 mmTPC2_QM_GLBL_SECURE_PROPS,
132 mmTPC2_QM_GLBL_NON_SECURE_PROPS,
133 mmTPC2_CMDQ_GLBL_SECURE_PROPS,
134 mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
135 mmTPC2_CFG_ARUSER,
136 mmTPC2_CFG_AWUSER,
137 mmTPC3_QM_GLBL_SECURE_PROPS,
138 mmTPC3_QM_GLBL_NON_SECURE_PROPS,
139 mmTPC3_CMDQ_GLBL_SECURE_PROPS,
140 mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
141 mmTPC3_CFG_ARUSER,
142 mmTPC3_CFG_AWUSER,
143 mmTPC4_QM_GLBL_SECURE_PROPS,
144 mmTPC4_QM_GLBL_NON_SECURE_PROPS,
145 mmTPC4_CMDQ_GLBL_SECURE_PROPS,
146 mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
147 mmTPC4_CFG_ARUSER,
148 mmTPC4_CFG_AWUSER,
149 mmTPC5_QM_GLBL_SECURE_PROPS,
150 mmTPC5_QM_GLBL_NON_SECURE_PROPS,
151 mmTPC5_CMDQ_GLBL_SECURE_PROPS,
152 mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
153 mmTPC5_CFG_ARUSER,
154 mmTPC5_CFG_AWUSER,
155 mmTPC6_QM_GLBL_SECURE_PROPS,
156 mmTPC6_QM_GLBL_NON_SECURE_PROPS,
157 mmTPC6_CMDQ_GLBL_SECURE_PROPS,
158 mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
159 mmTPC6_CFG_ARUSER,
160 mmTPC6_CFG_AWUSER,
161 mmTPC7_QM_GLBL_SECURE_PROPS,
162 mmTPC7_QM_GLBL_NON_SECURE_PROPS,
163 mmTPC7_CMDQ_GLBL_SECURE_PROPS,
164 mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
165 mmTPC7_CFG_ARUSER,
166 mmTPC7_CFG_AWUSER,
167 mmMME_QM_GLBL_SECURE_PROPS,
168 mmMME_QM_GLBL_NON_SECURE_PROPS,
169 mmMME_CMDQ_GLBL_SECURE_PROPS,
170 mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
171 mmMME_SBA_CONTROL_DATA,
172 mmMME_SBB_CONTROL_DATA,
173 mmMME_SBC_CONTROL_DATA,
174 mmMME_WBC_CONTROL_DATA
175};
176
Oded Gabbayb24ca452019-02-24 15:50:53 +0200177static u32 goya_all_events[] = {
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +0200178 GOYA_ASYNC_EVENT_ID_PCIE_IF,
179 GOYA_ASYNC_EVENT_ID_TPC0_ECC,
180 GOYA_ASYNC_EVENT_ID_TPC1_ECC,
181 GOYA_ASYNC_EVENT_ID_TPC2_ECC,
182 GOYA_ASYNC_EVENT_ID_TPC3_ECC,
183 GOYA_ASYNC_EVENT_ID_TPC4_ECC,
184 GOYA_ASYNC_EVENT_ID_TPC5_ECC,
185 GOYA_ASYNC_EVENT_ID_TPC6_ECC,
186 GOYA_ASYNC_EVENT_ID_TPC7_ECC,
187 GOYA_ASYNC_EVENT_ID_MME_ECC,
188 GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
189 GOYA_ASYNC_EVENT_ID_MMU_ECC,
190 GOYA_ASYNC_EVENT_ID_DMA_MACRO,
191 GOYA_ASYNC_EVENT_ID_DMA_ECC,
192 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
193 GOYA_ASYNC_EVENT_ID_PSOC_MEM,
194 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
195 GOYA_ASYNC_EVENT_ID_SRAM0,
196 GOYA_ASYNC_EVENT_ID_SRAM1,
197 GOYA_ASYNC_EVENT_ID_SRAM2,
198 GOYA_ASYNC_EVENT_ID_SRAM3,
199 GOYA_ASYNC_EVENT_ID_SRAM4,
200 GOYA_ASYNC_EVENT_ID_SRAM5,
201 GOYA_ASYNC_EVENT_ID_SRAM6,
202 GOYA_ASYNC_EVENT_ID_SRAM7,
203 GOYA_ASYNC_EVENT_ID_SRAM8,
204 GOYA_ASYNC_EVENT_ID_SRAM9,
205 GOYA_ASYNC_EVENT_ID_SRAM10,
206 GOYA_ASYNC_EVENT_ID_SRAM11,
207 GOYA_ASYNC_EVENT_ID_SRAM12,
208 GOYA_ASYNC_EVENT_ID_SRAM13,
209 GOYA_ASYNC_EVENT_ID_SRAM14,
210 GOYA_ASYNC_EVENT_ID_SRAM15,
211 GOYA_ASYNC_EVENT_ID_SRAM16,
212 GOYA_ASYNC_EVENT_ID_SRAM17,
213 GOYA_ASYNC_EVENT_ID_SRAM18,
214 GOYA_ASYNC_EVENT_ID_SRAM19,
215 GOYA_ASYNC_EVENT_ID_SRAM20,
216 GOYA_ASYNC_EVENT_ID_SRAM21,
217 GOYA_ASYNC_EVENT_ID_SRAM22,
218 GOYA_ASYNC_EVENT_ID_SRAM23,
219 GOYA_ASYNC_EVENT_ID_SRAM24,
220 GOYA_ASYNC_EVENT_ID_SRAM25,
221 GOYA_ASYNC_EVENT_ID_SRAM26,
222 GOYA_ASYNC_EVENT_ID_SRAM27,
223 GOYA_ASYNC_EVENT_ID_SRAM28,
224 GOYA_ASYNC_EVENT_ID_SRAM29,
225 GOYA_ASYNC_EVENT_ID_GIC500,
226 GOYA_ASYNC_EVENT_ID_PLL0,
227 GOYA_ASYNC_EVENT_ID_PLL1,
228 GOYA_ASYNC_EVENT_ID_PLL3,
229 GOYA_ASYNC_EVENT_ID_PLL4,
230 GOYA_ASYNC_EVENT_ID_PLL5,
231 GOYA_ASYNC_EVENT_ID_PLL6,
232 GOYA_ASYNC_EVENT_ID_AXI_ECC,
233 GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
234 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
235 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
236 GOYA_ASYNC_EVENT_ID_PCIE_DEC,
237 GOYA_ASYNC_EVENT_ID_TPC0_DEC,
238 GOYA_ASYNC_EVENT_ID_TPC1_DEC,
239 GOYA_ASYNC_EVENT_ID_TPC2_DEC,
240 GOYA_ASYNC_EVENT_ID_TPC3_DEC,
241 GOYA_ASYNC_EVENT_ID_TPC4_DEC,
242 GOYA_ASYNC_EVENT_ID_TPC5_DEC,
243 GOYA_ASYNC_EVENT_ID_TPC6_DEC,
244 GOYA_ASYNC_EVENT_ID_TPC7_DEC,
245 GOYA_ASYNC_EVENT_ID_MME_WACS,
246 GOYA_ASYNC_EVENT_ID_MME_WACSD,
247 GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
248 GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
249 GOYA_ASYNC_EVENT_ID_PSOC,
250 GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
251 GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
252 GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
253 GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
254 GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
255 GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
256 GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
257 GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
258 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
259 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
260 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
261 GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
262 GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
263 GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
264 GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
265 GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
266 GOYA_ASYNC_EVENT_ID_TPC0_QM,
267 GOYA_ASYNC_EVENT_ID_TPC1_QM,
268 GOYA_ASYNC_EVENT_ID_TPC2_QM,
269 GOYA_ASYNC_EVENT_ID_TPC3_QM,
270 GOYA_ASYNC_EVENT_ID_TPC4_QM,
271 GOYA_ASYNC_EVENT_ID_TPC5_QM,
272 GOYA_ASYNC_EVENT_ID_TPC6_QM,
273 GOYA_ASYNC_EVENT_ID_TPC7_QM,
274 GOYA_ASYNC_EVENT_ID_MME_QM,
275 GOYA_ASYNC_EVENT_ID_MME_CMDQ,
276 GOYA_ASYNC_EVENT_ID_DMA0_QM,
277 GOYA_ASYNC_EVENT_ID_DMA1_QM,
278 GOYA_ASYNC_EVENT_ID_DMA2_QM,
279 GOYA_ASYNC_EVENT_ID_DMA3_QM,
280 GOYA_ASYNC_EVENT_ID_DMA4_QM,
281 GOYA_ASYNC_EVENT_ID_DMA0_CH,
282 GOYA_ASYNC_EVENT_ID_DMA1_CH,
283 GOYA_ASYNC_EVENT_ID_DMA2_CH,
284 GOYA_ASYNC_EVENT_ID_DMA3_CH,
285 GOYA_ASYNC_EVENT_ID_DMA4_CH,
286 GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
287 GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
288 GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
289 GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
290 GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
291 GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
292 GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
293 GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
294 GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
295 GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
296 GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
297 GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
298 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
299};
300
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200301static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
302static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200303static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200304static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
305 u64 phys_addr);
Oded Gabbayd91389b2019-02-16 00:39:19 +0200306
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200307static void goya_get_fixed_properties(struct hl_device *hdev)
308{
309 struct asic_fixed_properties *prop = &hdev->asic_prop;
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200310 int i;
311
312 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
313 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
314 prop->hw_queues_props[i].kmd_only = 0;
315 }
316
317 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
318 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
319 prop->hw_queues_props[i].kmd_only = 1;
320 }
321
322 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
323 NUMBER_OF_INT_HW_QUEUES; i++) {
324 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
325 prop->hw_queues_props[i].kmd_only = 0;
326 }
327
328 for (; i < HL_MAX_QUEUES; i++)
329 prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200330
331 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
332
333 prop->dram_base_address = DRAM_PHYS_BASE;
334 prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
335 prop->dram_end_address = prop->dram_base_address + prop->dram_size;
336 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
337
338 prop->sram_base_address = SRAM_BASE_ADDR;
339 prop->sram_size = SRAM_SIZE;
340 prop->sram_end_address = prop->sram_base_address + prop->sram_size;
341 prop->sram_user_base_address = prop->sram_base_address +
342 SRAM_USER_BASE_OFFSET;
343
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200344 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200345 prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200346 if (hdev->pldm)
347 prop->mmu_pgt_size = 0x800000; /* 8MB */
348 else
349 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
350 prop->mmu_pte_size = HL_PTE_SIZE;
351 prop->mmu_hop_table_size = HOP_TABLE_SIZE;
352 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
353 prop->dram_page_size = PAGE_SIZE_2MB;
354
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200355 prop->host_phys_base_address = HOST_PHYS_BASE;
356 prop->va_space_host_start_address = VA_HOST_SPACE_START;
357 prop->va_space_host_end_address = VA_HOST_SPACE_END;
358 prop->va_space_dram_start_address = VA_DDR_SPACE_START;
359 prop->va_space_dram_end_address = VA_DDR_SPACE_END;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200360 prop->dram_size_for_default_page_mapping =
361 prop->va_space_dram_end_address;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200362 prop->cfg_size = CFG_SIZE;
363 prop->max_asid = MAX_ASID;
Oded Gabbay1251f232019-02-16 00:39:18 +0200364 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
Tomer Tayarb6f897d2019-03-05 16:48:42 +0200365 prop->high_pll = PLL_HIGH_DEFAULT;
Oded Gabbay839c4802019-02-16 00:39:16 +0200366 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
367 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200368 prop->max_power_default = MAX_POWER_DEFAULT;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200369 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
Tomer Tayarb6f897d2019-03-05 16:48:42 +0200370 prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
371 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200372}
373
374/*
375 * goya_pci_bars_map - Map PCI BARS of Goya device
376 *
377 * @hdev: pointer to hl_device structure
378 *
379 * Request PCI regions and map them to kernel virtual addresses.
380 * Returns 0 on success
381 *
382 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200383static int goya_pci_bars_map(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200384{
Tomer Tayarb6f897d2019-03-05 16:48:42 +0200385 static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"};
386 bool is_wc[3] = {false, false, true};
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200387 int rc;
388
Tomer Tayarb6f897d2019-03-05 16:48:42 +0200389 rc = hl_pci_bars_map(hdev, name, is_wc);
390 if (rc)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200391 return rc;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200392
393 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
Tomer Tayarb6f897d2019-03-05 16:48:42 +0200394 (CFG_BASE - SRAM_BASE_ADDR);
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200395
396 return 0;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200397}
398
399/*
400 * goya_set_ddr_bar_base - set DDR bar to map specific device address
401 *
402 * @hdev: pointer to hl_device structure
403 * @addr: address in DDR. Must be aligned to DDR bar size
404 *
405 * This function configures the iATU so that the DDR bar will start at the
406 * specified addr.
407 *
408 */
409static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
410{
411 struct goya_device *goya = hdev->asic_specific;
412 int rc;
413
414 if ((goya) && (goya->ddr_bar_cur_addr == addr))
415 return 0;
416
417 /* Inbound Region 1 - Bar 4 - Point to DDR */
Tomer Tayarb6f897d2019-03-05 16:48:42 +0200418 rc = hl_pci_set_dram_bar_base(hdev, 1, 4, addr);
419 if (rc)
420 return rc;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200421
422 if (goya)
423 goya->ddr_bar_cur_addr = addr;
424
425 return 0;
426}
427
428/*
429 * goya_init_iatu - Initialize the iATU unit inside the PCI controller
430 *
431 * @hdev: pointer to hl_device structure
432 *
433 * This is needed in case the firmware doesn't initialize the iATU
434 *
435 */
436static int goya_init_iatu(struct hl_device *hdev)
437{
Tomer Tayarb6f897d2019-03-05 16:48:42 +0200438 return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
439 HOST_PHYS_SIZE);
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200440}
441
442/*
443 * goya_early_init - GOYA early initialization code
444 *
445 * @hdev: pointer to hl_device structure
446 *
447 * Verify PCI bars
448 * Set DMA masks
449 * PCI controller initialization
450 * Map PCI bars
451 *
452 */
453static int goya_early_init(struct hl_device *hdev)
454{
455 struct asic_fixed_properties *prop = &hdev->asic_prop;
456 struct pci_dev *pdev = hdev->pdev;
457 u32 val;
458 int rc;
459
460 goya_get_fixed_properties(hdev);
461
462 /* Check BAR sizes */
463 if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
464 dev_err(hdev->dev,
465 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
466 SRAM_CFG_BAR_ID,
467 (unsigned long long) pci_resource_len(pdev,
468 SRAM_CFG_BAR_ID),
469 CFG_BAR_SIZE);
470 return -ENODEV;
471 }
472
473 if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
474 dev_err(hdev->dev,
475 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
476 MSIX_BAR_ID,
477 (unsigned long long) pci_resource_len(pdev,
478 MSIX_BAR_ID),
479 MSIX_BAR_SIZE);
480 return -ENODEV;
481 }
482
483 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
484
Oded Gabbayd9973872019-03-07 18:03:23 +0200485 rc = hl_pci_init(hdev, 39);
Tomer Tayarb6f897d2019-03-05 16:48:42 +0200486 if (rc)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200487 return rc;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200488
Oded Gabbay839c4802019-02-16 00:39:16 +0200489 if (!hdev->pldm) {
490 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
491 if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
492 dev_warn(hdev->dev,
493 "PCI strap is not configured correctly, PCI bus errors may occur\n");
494 }
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200495
496 return 0;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200497}
498
499/*
500 * goya_early_fini - GOYA early finalization code
501 *
502 * @hdev: pointer to hl_device structure
503 *
504 * Unmap PCI bars
505 *
506 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200507static int goya_early_fini(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200508{
Tomer Tayarb6f897d2019-03-05 16:48:42 +0200509 hl_pci_fini(hdev);
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200510
511 return 0;
512}
513
514/*
Oded Gabbayd91389b2019-02-16 00:39:19 +0200515 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
516 *
517 * @hdev: pointer to hl_device structure
518 *
519 */
520static void goya_fetch_psoc_frequency(struct hl_device *hdev)
521{
522 struct asic_fixed_properties *prop = &hdev->asic_prop;
523
524 prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
525 prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
526 prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
527 prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
528}
529
530/*
531 * goya_late_init - GOYA late initialization code
532 *
533 * @hdev: pointer to hl_device structure
534 *
535 * Get ArmCP info and send message to CPU to enable PCI access
536 */
537static int goya_late_init(struct hl_device *hdev)
538{
539 struct asic_fixed_properties *prop = &hdev->asic_prop;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200540 int rc;
541
Tomer Tayar393e5b52019-03-06 14:30:26 +0200542 rc = goya_armcp_info_get(hdev);
Oded Gabbayd91389b2019-02-16 00:39:19 +0200543 if (rc) {
544 dev_err(hdev->dev, "Failed to get armcp info\n");
545 return rc;
546 }
547
548 /* Now that we have the DRAM size in ASIC prop, we need to check
549 * its size and configure the DMA_IF DDR wrap protection (which is in
550 * the MMU block) accordingly. The value is the log2 of the DRAM size
551 */
552 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
553
Tomer Tayar3110c602019-03-04 10:22:09 +0200554 rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
Oded Gabbayd91389b2019-02-16 00:39:19 +0200555 if (rc) {
556 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
557 return rc;
558 }
559
560 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
561 GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
562
563 goya_fetch_psoc_frequency(hdev);
564
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200565 rc = goya_mmu_clear_pgt_range(hdev);
566 if (rc) {
567 dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
568 goto disable_pci_access;
569 }
570
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200571 rc = goya_mmu_set_dram_default_page(hdev);
572 if (rc) {
573 dev_err(hdev->dev, "Failed to set DRAM default page\n");
574 goto disable_pci_access;
575 }
576
Oded Gabbayd91389b2019-02-16 00:39:19 +0200577 return 0;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200578
579disable_pci_access:
Tomer Tayar3110c602019-03-04 10:22:09 +0200580 hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200581
582 return rc;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200583}
584
585/*
586 * goya_late_fini - GOYA late tear-down code
587 *
588 * @hdev: pointer to hl_device structure
589 *
590 * Free sensors allocated structures
591 */
592void goya_late_fini(struct hl_device *hdev)
593{
594 const struct hwmon_channel_info **channel_info_arr;
595 int i = 0;
596
597 if (!hdev->hl_chip_info->info)
598 return;
599
600 channel_info_arr = hdev->hl_chip_info->info;
601
602 while (channel_info_arr[i]) {
603 kfree(channel_info_arr[i]->config);
604 kfree(channel_info_arr[i]);
605 i++;
606 }
607
608 kfree(channel_info_arr);
609
610 hdev->hl_chip_info->info = NULL;
611}
612
613/*
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200614 * goya_sw_init - Goya software initialization code
615 *
616 * @hdev: pointer to hl_device structure
617 *
618 */
619static int goya_sw_init(struct hl_device *hdev)
620{
621 struct goya_device *goya;
622 int rc;
623
624 /* Allocate device structure */
625 goya = kzalloc(sizeof(*goya), GFP_KERNEL);
626 if (!goya)
627 return -ENOMEM;
628
629 /* according to goya_init_iatu */
630 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200631
632 goya->mme_clk = GOYA_PLL_FREQ_LOW;
633 goya->tpc_clk = GOYA_PLL_FREQ_LOW;
634 goya->ic_clk = GOYA_PLL_FREQ_LOW;
635
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200636 hdev->asic_specific = goya;
637
638 /* Create DMA pool for small allocations */
639 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
640 &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
641 if (!hdev->dma_pool) {
642 dev_err(hdev->dev, "failed to create DMA pool\n");
643 rc = -ENOMEM;
644 goto free_goya_device;
645 }
646
647 hdev->cpu_accessible_dma_mem =
648 hdev->asic_funcs->dma_alloc_coherent(hdev,
Tomer Tayar3110c602019-03-04 10:22:09 +0200649 HL_CPU_ACCESSIBLE_MEM_SIZE,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200650 &hdev->cpu_accessible_dma_address,
651 GFP_KERNEL | __GFP_ZERO);
652
653 if (!hdev->cpu_accessible_dma_mem) {
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200654 rc = -ENOMEM;
655 goto free_dma_pool;
656 }
657
Tomer Tayar3110c602019-03-04 10:22:09 +0200658 hdev->cpu_accessible_dma_pool = gen_pool_create(HL_CPU_PKT_SHIFT, -1);
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200659 if (!hdev->cpu_accessible_dma_pool) {
660 dev_err(hdev->dev,
661 "Failed to create CPU accessible DMA pool\n");
662 rc = -ENOMEM;
663 goto free_cpu_pq_dma_mem;
664 }
665
666 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
667 (uintptr_t) hdev->cpu_accessible_dma_mem,
Tomer Tayar3110c602019-03-04 10:22:09 +0200668 HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200669 if (rc) {
670 dev_err(hdev->dev,
671 "Failed to add memory to CPU accessible DMA pool\n");
672 rc = -EFAULT;
673 goto free_cpu_pq_pool;
674 }
675
676 spin_lock_init(&goya->hw_queues_lock);
677
678 return 0;
679
680free_cpu_pq_pool:
681 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
682free_cpu_pq_dma_mem:
Tomer Tayar3110c602019-03-04 10:22:09 +0200683 hdev->asic_funcs->dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200684 hdev->cpu_accessible_dma_mem,
685 hdev->cpu_accessible_dma_address);
686free_dma_pool:
687 dma_pool_destroy(hdev->dma_pool);
688free_goya_device:
689 kfree(goya);
690
691 return rc;
692}
693
694/*
695 * goya_sw_fini - Goya software tear-down code
696 *
697 * @hdev: pointer to hl_device structure
698 *
699 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200700static int goya_sw_fini(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200701{
702 struct goya_device *goya = hdev->asic_specific;
703
704 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
705
Tomer Tayar3110c602019-03-04 10:22:09 +0200706 hdev->asic_funcs->dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200707 hdev->cpu_accessible_dma_mem,
708 hdev->cpu_accessible_dma_address);
709
710 dma_pool_destroy(hdev->dma_pool);
711
712 kfree(goya);
713
714 return 0;
715}
716
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200717static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
718 dma_addr_t bus_address)
719{
720 struct goya_device *goya = hdev->asic_specific;
721 u32 mtr_base_lo, mtr_base_hi;
722 u32 so_base_lo, so_base_hi;
723 u32 gic_base_lo, gic_base_hi;
724 u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
725
726 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
727 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
728 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
729 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
730
731 gic_base_lo =
732 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
733 gic_base_hi =
734 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
735
736 WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
737 WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
738
739 WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
740 WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
741 WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
742
743 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
744 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
745 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
746 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
747 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
748 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
749 WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
750 GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
751
752 /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
753 WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
754 WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
755
Oded Gabbay1251f232019-02-16 00:39:18 +0200756 if (goya->hw_cap_initialized & HW_CAP_MMU)
757 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200758 else
Oded Gabbay1251f232019-02-16 00:39:18 +0200759 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200760
761 WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN);
762 WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
763}
764
765static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
766{
767 u32 gic_base_lo, gic_base_hi;
768 u64 sob_addr;
769 u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
770
771 gic_base_lo =
772 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
773 gic_base_hi =
774 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
775
776 WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
777 WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
778 WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
779 GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
780
Oded Gabbay887f7d32019-02-28 10:46:15 +0200781 if (dma_id)
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200782 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
783 (dma_id - 1) * 4;
Oded Gabbay887f7d32019-02-28 10:46:15 +0200784 else
785 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
786
787 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + reg_off, lower_32_bits(sob_addr));
788 WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
789 WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200790}
791
792/*
793 * goya_init_dma_qmans - Initialize QMAN DMA registers
794 *
795 * @hdev: pointer to hl_device structure
796 *
797 * Initialize the H/W registers of the QMAN DMA channels
798 *
799 */
800static void goya_init_dma_qmans(struct hl_device *hdev)
801{
802 struct goya_device *goya = hdev->asic_specific;
803 struct hl_hw_queue *q;
804 dma_addr_t bus_address;
805 int i;
806
807 if (goya->hw_cap_initialized & HW_CAP_DMA)
808 return;
809
810 q = &hdev->kernel_queues[0];
811
812 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
813 bus_address = q->bus_address +
814 hdev->asic_prop.host_phys_base_address;
815
816 goya_init_dma_qman(hdev, i, bus_address);
817 goya_init_dma_ch(hdev, i);
818 }
819
820 goya->hw_cap_initialized |= HW_CAP_DMA;
821}
822
823/*
824 * goya_disable_external_queues - Disable external queues
825 *
826 * @hdev: pointer to hl_device structure
827 *
828 */
829static void goya_disable_external_queues(struct hl_device *hdev)
830{
831 WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
832 WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
833 WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
834 WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
835 WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
836}
837
838static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
839 u32 cp_sts_reg, u32 glbl_sts0_reg)
840{
841 int rc;
842 u32 status;
843
844 /* use the values of TPC0 as they are all the same*/
845
846 WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
847
848 status = RREG32(cp_sts_reg);
849 if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
850 rc = hl_poll_timeout(
851 hdev,
852 cp_sts_reg,
853 status,
854 !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
855 1000,
856 QMAN_FENCE_TIMEOUT_USEC);
857
858 /* if QMAN is stuck in fence no need to check for stop */
859 if (rc)
860 return 0;
861 }
862
863 rc = hl_poll_timeout(
864 hdev,
865 glbl_sts0_reg,
866 status,
867 (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
868 1000,
869 QMAN_STOP_TIMEOUT_USEC);
870
871 if (rc) {
872 dev_err(hdev->dev,
873 "Timeout while waiting for QMAN to stop\n");
874 return -EINVAL;
875 }
876
877 return 0;
878}
879
880/*
881 * goya_stop_external_queues - Stop external queues
882 *
883 * @hdev: pointer to hl_device structure
884 *
885 * Returns 0 on success
886 *
887 */
888static int goya_stop_external_queues(struct hl_device *hdev)
889{
890 int rc, retval = 0;
891
892 rc = goya_stop_queue(hdev,
893 mmDMA_QM_0_GLBL_CFG1,
894 mmDMA_QM_0_CP_STS,
895 mmDMA_QM_0_GLBL_STS0);
896
897 if (rc) {
898 dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
899 retval = -EIO;
900 }
901
902 rc = goya_stop_queue(hdev,
903 mmDMA_QM_1_GLBL_CFG1,
904 mmDMA_QM_1_CP_STS,
905 mmDMA_QM_1_GLBL_STS0);
906
907 if (rc) {
908 dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
909 retval = -EIO;
910 }
911
912 rc = goya_stop_queue(hdev,
913 mmDMA_QM_2_GLBL_CFG1,
914 mmDMA_QM_2_CP_STS,
915 mmDMA_QM_2_GLBL_STS0);
916
917 if (rc) {
918 dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
919 retval = -EIO;
920 }
921
922 rc = goya_stop_queue(hdev,
923 mmDMA_QM_3_GLBL_CFG1,
924 mmDMA_QM_3_CP_STS,
925 mmDMA_QM_3_GLBL_STS0);
926
927 if (rc) {
928 dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
929 retval = -EIO;
930 }
931
932 rc = goya_stop_queue(hdev,
933 mmDMA_QM_4_GLBL_CFG1,
934 mmDMA_QM_4_CP_STS,
935 mmDMA_QM_4_GLBL_STS0);
936
937 if (rc) {
938 dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
939 retval = -EIO;
940 }
941
942 return retval;
943}
944
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200945/*
946 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
947 *
948 * @hdev: pointer to hl_device structure
949 *
950 * Returns 0 on success
951 *
952 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200953static int goya_init_cpu_queues(struct hl_device *hdev)
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200954{
955 struct goya_device *goya = hdev->asic_specific;
Oded Gabbay1251f232019-02-16 00:39:18 +0200956 struct hl_eq *eq;
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200957 dma_addr_t bus_address;
958 u32 status;
959 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
960 int err;
961
962 if (!hdev->cpu_queues_enable)
963 return 0;
964
965 if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
966 return 0;
967
Oded Gabbay1251f232019-02-16 00:39:18 +0200968 eq = &hdev->event_queue;
969
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200970 bus_address = cpu_pq->bus_address +
971 hdev->asic_prop.host_phys_base_address;
972 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, lower_32_bits(bus_address));
973 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1, upper_32_bits(bus_address));
974
Oded Gabbay1251f232019-02-16 00:39:18 +0200975 bus_address = eq->bus_address + hdev->asic_prop.host_phys_base_address;
976 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(bus_address));
977 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(bus_address));
978
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200979 bus_address = hdev->cpu_accessible_dma_address +
980 hdev->asic_prop.host_phys_base_address;
981 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, lower_32_bits(bus_address));
982 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, upper_32_bits(bus_address));
983
984 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
Oded Gabbay1251f232019-02-16 00:39:18 +0200985 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
Tomer Tayar3110c602019-03-04 10:22:09 +0200986 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, HL_CPU_ACCESSIBLE_MEM_SIZE);
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200987
988 /* Used for EQ CI */
989 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
990
991 WREG32(mmCPU_IF_PF_PQ_PI, 0);
992
993 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP);
994
995 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
996 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
997
998 err = hl_poll_timeout(
999 hdev,
1000 mmPSOC_GLOBAL_CONF_SCRATCHPAD_7,
1001 status,
1002 (status == PQ_INIT_STATUS_READY_FOR_HOST),
1003 1000,
1004 GOYA_CPU_TIMEOUT_USEC);
1005
1006 if (err) {
1007 dev_err(hdev->dev,
1008 "Failed to communicate with ARM CPU (ArmCP timeout)\n");
1009 return -EIO;
1010 }
1011
1012 goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1013 return 0;
1014}
1015
Oded Gabbay839c4802019-02-16 00:39:16 +02001016static void goya_set_pll_refclk(struct hl_device *hdev)
1017{
1018 WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1019 WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1020 WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1021 WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1022
1023 WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1024 WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1025 WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1026 WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1027
1028 WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1029 WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1030 WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1031 WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1032
1033 WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1034 WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1035 WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1036 WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1037
1038 WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1039 WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1040 WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1041 WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1042
1043 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1044 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1045 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1046 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1047
1048 WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1049 WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1050 WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1051 WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1052}
1053
1054static void goya_disable_clk_rlx(struct hl_device *hdev)
1055{
1056 WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1057 WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1058}
1059
1060static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1061{
1062 u64 tpc_eml_address;
1063 u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1064 int err, slm_index;
1065
1066 tpc_offset = tpc_id * 0x40000;
1067 tpc_eml_offset = tpc_id * 0x200000;
1068 tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1069 tpc_slm_offset = tpc_eml_address + 0x100000;
1070
1071 /*
1072 * Workaround for Bug H2 #2443 :
1073 * "TPC SB is not initialized on chip reset"
1074 */
1075
1076 val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1077 if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1078 dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1079 tpc_id);
1080
1081 WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1082
1083 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1084 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1085 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1086 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1087 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1088 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1089 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1090 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1091 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1092 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1093
1094 WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1095 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1096
1097 err = hl_poll_timeout(
1098 hdev,
1099 mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1100 val,
1101 (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1102 1000,
1103 HL_DEVICE_TIMEOUT_USEC);
1104
1105 if (err)
1106 dev_err(hdev->dev,
1107 "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1108
1109 WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1110 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1111
1112 msleep(GOYA_RESET_WAIT_MSEC);
1113
1114 WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1115 ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1116
1117 msleep(GOYA_RESET_WAIT_MSEC);
1118
1119 for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1120 WREG32(tpc_slm_offset + (slm_index << 2), 0);
1121
1122 val = RREG32(tpc_slm_offset);
1123}
1124
1125static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1126{
1127 struct goya_device *goya = hdev->asic_specific;
1128 int i;
1129
1130 if (hdev->pldm)
1131 return;
1132
1133 if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1134 return;
1135
1136 /* Workaround for H2 #2443 */
1137
1138 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1139 _goya_tpc_mbist_workaround(hdev, i);
1140
1141 goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1142}
1143
1144/*
1145 * goya_init_golden_registers - Initialize golden registers
1146 *
1147 * @hdev: pointer to hl_device structure
1148 *
1149 * Initialize the H/W registers of the device
1150 *
1151 */
1152static void goya_init_golden_registers(struct hl_device *hdev)
1153{
1154 struct goya_device *goya = hdev->asic_specific;
1155 u32 polynom[10], tpc_intr_mask, offset;
1156 int i;
1157
1158 if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1159 return;
1160
1161 polynom[0] = 0x00020080;
1162 polynom[1] = 0x00401000;
1163 polynom[2] = 0x00200800;
1164 polynom[3] = 0x00002000;
1165 polynom[4] = 0x00080200;
1166 polynom[5] = 0x00040100;
1167 polynom[6] = 0x00100400;
1168 polynom[7] = 0x00004000;
1169 polynom[8] = 0x00010000;
1170 polynom[9] = 0x00008000;
1171
1172 /* Mask all arithmetic interrupts from TPC */
1173 tpc_intr_mask = 0x7FFF;
1174
1175 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1176 WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1177 WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1178 WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1179 WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1180 WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1181
1182 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1183 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1184 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1185 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1186 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1187
1188
1189 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1190 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1191 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1192 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1193 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1194
1195 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1196 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1197 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1198 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1199 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1200
1201 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1202 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1203 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1204 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1205 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1206
1207 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1208 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1209 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1210 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1211 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1212 }
1213
1214 WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1215 WREG32(mmMME_AGU, 0x0f0f0f10);
1216 WREG32(mmMME_SEI_MASK, ~0x0);
1217
1218 WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1219 WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1220 WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1221 WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1222 WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1223 WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1224 WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1225 WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1226 WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1227 WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1228 WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1229 WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1230 WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1231 WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1232 WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1233 WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1234 WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1235 WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1236 WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1237 WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1238 WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1239 WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1240 WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1241 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1242 WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1243 WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1244 WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1245 WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1246 WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1247 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1248 WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1249 WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1250 WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1251 WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1252 WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1253 WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1254 WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1255 WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1256 WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1257 WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1258 WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1259 WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1260 WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1261 WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1262 WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1263 WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1264 WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1265 WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1266 WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1267 WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1268 WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1269 WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1270 WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1271 WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1272 WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1273 WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1274 WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1275 WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1276 WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1277 WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1278 WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1279 WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1280 WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1281 WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1282 WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1283 WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1284 WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1285 WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1286 WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1287 WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1288 WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1289 WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1290 WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1291 WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1292 WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1293 WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1294 WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1295 WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1296 WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1297 WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1298 WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1299 WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1300 WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1301 WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1302
1303 WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1304 WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1305 WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1306 WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1307 WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1308 WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1309 WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1310 WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1311 WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1312 WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1313 WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1314 WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1315
1316 WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1317 WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1318 WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1319 WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1320 WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1321 WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1322 WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1323 WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1324 WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1325 WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1326 WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1327 WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1328
1329 WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1330 WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1331 WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1332 WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1333 WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1334 WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1335 WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1336 WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1337 WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1338 WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1339 WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1340 WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1341
1342 WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1343 WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1344 WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1345 WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1346 WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1347 WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1348 WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1349 WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1350 WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1351 WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1352 WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1353 WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1354
1355 WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1356 WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1357 WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1358 WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1359 WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1360 WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1361 WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1362 WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1363 WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1364 WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1365 WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1366 WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1367
1368 WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1369 WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1370 WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1371 WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1372 WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1373 WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1374 WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1375 WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1376 WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1377 WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1378 WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1379 WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1380
1381 for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1382 WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1383 WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1384 WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1385 WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1386 WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1387 WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1388
1389 WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1390 WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1391 WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1392 WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1393 WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1394 WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1395 WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1396 WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1397
1398 WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1399 WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1400 }
1401
1402 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1403 WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1404 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1405 WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1406 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1407 }
1408
1409 for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1410 /*
1411 * Workaround for Bug H2 #2441 :
1412 * "ST.NOP set trace event illegal opcode"
1413 */
1414 WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1415
1416 WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1417 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1418 WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1419 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1420 }
1421
1422 WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1423 WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1424 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1425
1426 WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1427 WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1428 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1429
1430 /*
1431 * Workaround for H2 #HW-23 bug
1432 * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
1433 * to 16 on KMD DMA
1434 * We need to limit only these DMAs because the user can only read
1435 * from Host using DMA CH 1
1436 */
1437 WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
1438 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1439
1440 goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1441}
1442
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001443static void goya_init_mme_qman(struct hl_device *hdev)
1444{
1445 u32 mtr_base_lo, mtr_base_hi;
1446 u32 so_base_lo, so_base_hi;
1447 u32 gic_base_lo, gic_base_hi;
1448 u64 qman_base_addr;
1449
1450 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1451 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1452 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1453 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1454
1455 gic_base_lo =
1456 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1457 gic_base_hi =
1458 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1459
1460 qman_base_addr = hdev->asic_prop.sram_base_address +
1461 MME_QMAN_BASE_OFFSET;
1462
1463 WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1464 WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1465 WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1466 WREG32(mmMME_QM_PQ_PI, 0);
1467 WREG32(mmMME_QM_PQ_CI, 0);
1468 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1469 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1470 WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1471 WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1472
1473 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1474 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1475 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1476 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1477
1478 /* QMAN CQ has 8 cache lines */
1479 WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1480
1481 WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1482 WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1483
1484 WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1485
1486 WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1487
1488 WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1489
1490 WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1491}
1492
1493static void goya_init_mme_cmdq(struct hl_device *hdev)
1494{
1495 u32 mtr_base_lo, mtr_base_hi;
1496 u32 so_base_lo, so_base_hi;
1497 u32 gic_base_lo, gic_base_hi;
1498 u64 qman_base_addr;
1499
1500 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1501 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1502 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1503 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1504
1505 gic_base_lo =
1506 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1507 gic_base_hi =
1508 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1509
1510 qman_base_addr = hdev->asic_prop.sram_base_address +
1511 MME_QMAN_BASE_OFFSET;
1512
1513 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1514 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1515 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1516 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1517
1518 /* CMDQ CQ has 20 cache lines */
1519 WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1520
1521 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1522 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1523
1524 WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1525
1526 WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1527
1528 WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1529
1530 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1531}
1532
1533static void goya_init_mme_qmans(struct hl_device *hdev)
1534{
1535 struct goya_device *goya = hdev->asic_specific;
1536 u32 so_base_lo, so_base_hi;
1537
1538 if (goya->hw_cap_initialized & HW_CAP_MME)
1539 return;
1540
1541 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1542 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1543
1544 WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1545 WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1546
1547 goya_init_mme_qman(hdev);
1548 goya_init_mme_cmdq(hdev);
1549
1550 goya->hw_cap_initialized |= HW_CAP_MME;
1551}
1552
1553static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1554{
1555 u32 mtr_base_lo, mtr_base_hi;
1556 u32 so_base_lo, so_base_hi;
1557 u32 gic_base_lo, gic_base_hi;
1558 u64 qman_base_addr;
1559 u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1560
1561 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1562 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1563 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1564 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1565
1566 gic_base_lo =
1567 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1568 gic_base_hi =
1569 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1570
1571 qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1572
1573 WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1574 WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1575 WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1576 WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1577 WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1578 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1579 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1580 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1581 WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1582
1583 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1584 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1585 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1586 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1587
1588 WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1589
1590 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1591 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1592
1593 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1594 GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1595
1596 WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1597
1598 WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1599
1600 WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1601}
1602
1603static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1604{
1605 u32 mtr_base_lo, mtr_base_hi;
1606 u32 so_base_lo, so_base_hi;
1607 u32 gic_base_lo, gic_base_hi;
1608 u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1609
1610 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1611 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1612 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1613 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1614
1615 gic_base_lo =
1616 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1617 gic_base_hi =
1618 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1619
1620 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1621 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1622 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1623 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1624
1625 WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
1626
1627 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1628 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1629
1630 WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
1631 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
1632
1633 WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
1634
1635 WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
1636
1637 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
1638}
1639
1640static void goya_init_tpc_qmans(struct hl_device *hdev)
1641{
1642 struct goya_device *goya = hdev->asic_specific;
1643 u32 so_base_lo, so_base_hi;
1644 u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
1645 mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
1646 int i;
1647
1648 if (goya->hw_cap_initialized & HW_CAP_TPC)
1649 return;
1650
1651 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1652 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1653
1654 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
1655 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
1656 so_base_lo);
1657 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
1658 so_base_hi);
1659 }
1660
1661 goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
1662 goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
1663 goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
1664 goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
1665 goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
1666 goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
1667 goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
1668 goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
1669
1670 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1671 goya_init_tpc_cmdq(hdev, i);
1672
1673 goya->hw_cap_initialized |= HW_CAP_TPC;
1674}
1675
1676/*
1677 * goya_disable_internal_queues - Disable internal queues
1678 *
1679 * @hdev: pointer to hl_device structure
1680 *
1681 */
1682static void goya_disable_internal_queues(struct hl_device *hdev)
1683{
1684 WREG32(mmMME_QM_GLBL_CFG0, 0);
1685 WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
1686
1687 WREG32(mmTPC0_QM_GLBL_CFG0, 0);
1688 WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
1689
1690 WREG32(mmTPC1_QM_GLBL_CFG0, 0);
1691 WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
1692
1693 WREG32(mmTPC2_QM_GLBL_CFG0, 0);
1694 WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
1695
1696 WREG32(mmTPC3_QM_GLBL_CFG0, 0);
1697 WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
1698
1699 WREG32(mmTPC4_QM_GLBL_CFG0, 0);
1700 WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
1701
1702 WREG32(mmTPC5_QM_GLBL_CFG0, 0);
1703 WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
1704
1705 WREG32(mmTPC6_QM_GLBL_CFG0, 0);
1706 WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
1707
1708 WREG32(mmTPC7_QM_GLBL_CFG0, 0);
1709 WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
1710}
1711
1712/*
1713 * goya_stop_internal_queues - Stop internal queues
1714 *
1715 * @hdev: pointer to hl_device structure
1716 *
1717 * Returns 0 on success
1718 *
1719 */
1720static int goya_stop_internal_queues(struct hl_device *hdev)
1721{
1722 int rc, retval = 0;
1723
1724 /*
1725 * Each queue (QMAN) is a separate H/W logic. That means that each
1726 * QMAN can be stopped independently and failure to stop one does NOT
1727 * mandate we should not try to stop other QMANs
1728 */
1729
1730 rc = goya_stop_queue(hdev,
1731 mmMME_QM_GLBL_CFG1,
1732 mmMME_QM_CP_STS,
1733 mmMME_QM_GLBL_STS0);
1734
1735 if (rc) {
1736 dev_err(hdev->dev, "failed to stop MME QMAN\n");
1737 retval = -EIO;
1738 }
1739
1740 rc = goya_stop_queue(hdev,
1741 mmMME_CMDQ_GLBL_CFG1,
1742 mmMME_CMDQ_CP_STS,
1743 mmMME_CMDQ_GLBL_STS0);
1744
1745 if (rc) {
1746 dev_err(hdev->dev, "failed to stop MME CMDQ\n");
1747 retval = -EIO;
1748 }
1749
1750 rc = goya_stop_queue(hdev,
1751 mmTPC0_QM_GLBL_CFG1,
1752 mmTPC0_QM_CP_STS,
1753 mmTPC0_QM_GLBL_STS0);
1754
1755 if (rc) {
1756 dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
1757 retval = -EIO;
1758 }
1759
1760 rc = goya_stop_queue(hdev,
1761 mmTPC0_CMDQ_GLBL_CFG1,
1762 mmTPC0_CMDQ_CP_STS,
1763 mmTPC0_CMDQ_GLBL_STS0);
1764
1765 if (rc) {
1766 dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
1767 retval = -EIO;
1768 }
1769
1770 rc = goya_stop_queue(hdev,
1771 mmTPC1_QM_GLBL_CFG1,
1772 mmTPC1_QM_CP_STS,
1773 mmTPC1_QM_GLBL_STS0);
1774
1775 if (rc) {
1776 dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
1777 retval = -EIO;
1778 }
1779
1780 rc = goya_stop_queue(hdev,
1781 mmTPC1_CMDQ_GLBL_CFG1,
1782 mmTPC1_CMDQ_CP_STS,
1783 mmTPC1_CMDQ_GLBL_STS0);
1784
1785 if (rc) {
1786 dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
1787 retval = -EIO;
1788 }
1789
1790 rc = goya_stop_queue(hdev,
1791 mmTPC2_QM_GLBL_CFG1,
1792 mmTPC2_QM_CP_STS,
1793 mmTPC2_QM_GLBL_STS0);
1794
1795 if (rc) {
1796 dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
1797 retval = -EIO;
1798 }
1799
1800 rc = goya_stop_queue(hdev,
1801 mmTPC2_CMDQ_GLBL_CFG1,
1802 mmTPC2_CMDQ_CP_STS,
1803 mmTPC2_CMDQ_GLBL_STS0);
1804
1805 if (rc) {
1806 dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
1807 retval = -EIO;
1808 }
1809
1810 rc = goya_stop_queue(hdev,
1811 mmTPC3_QM_GLBL_CFG1,
1812 mmTPC3_QM_CP_STS,
1813 mmTPC3_QM_GLBL_STS0);
1814
1815 if (rc) {
1816 dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
1817 retval = -EIO;
1818 }
1819
1820 rc = goya_stop_queue(hdev,
1821 mmTPC3_CMDQ_GLBL_CFG1,
1822 mmTPC3_CMDQ_CP_STS,
1823 mmTPC3_CMDQ_GLBL_STS0);
1824
1825 if (rc) {
1826 dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
1827 retval = -EIO;
1828 }
1829
1830 rc = goya_stop_queue(hdev,
1831 mmTPC4_QM_GLBL_CFG1,
1832 mmTPC4_QM_CP_STS,
1833 mmTPC4_QM_GLBL_STS0);
1834
1835 if (rc) {
1836 dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
1837 retval = -EIO;
1838 }
1839
1840 rc = goya_stop_queue(hdev,
1841 mmTPC4_CMDQ_GLBL_CFG1,
1842 mmTPC4_CMDQ_CP_STS,
1843 mmTPC4_CMDQ_GLBL_STS0);
1844
1845 if (rc) {
1846 dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
1847 retval = -EIO;
1848 }
1849
1850 rc = goya_stop_queue(hdev,
1851 mmTPC5_QM_GLBL_CFG1,
1852 mmTPC5_QM_CP_STS,
1853 mmTPC5_QM_GLBL_STS0);
1854
1855 if (rc) {
1856 dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
1857 retval = -EIO;
1858 }
1859
1860 rc = goya_stop_queue(hdev,
1861 mmTPC5_CMDQ_GLBL_CFG1,
1862 mmTPC5_CMDQ_CP_STS,
1863 mmTPC5_CMDQ_GLBL_STS0);
1864
1865 if (rc) {
1866 dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
1867 retval = -EIO;
1868 }
1869
1870 rc = goya_stop_queue(hdev,
1871 mmTPC6_QM_GLBL_CFG1,
1872 mmTPC6_QM_CP_STS,
1873 mmTPC6_QM_GLBL_STS0);
1874
1875 if (rc) {
1876 dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
1877 retval = -EIO;
1878 }
1879
1880 rc = goya_stop_queue(hdev,
1881 mmTPC6_CMDQ_GLBL_CFG1,
1882 mmTPC6_CMDQ_CP_STS,
1883 mmTPC6_CMDQ_GLBL_STS0);
1884
1885 if (rc) {
1886 dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
1887 retval = -EIO;
1888 }
1889
1890 rc = goya_stop_queue(hdev,
1891 mmTPC7_QM_GLBL_CFG1,
1892 mmTPC7_QM_CP_STS,
1893 mmTPC7_QM_GLBL_STS0);
1894
1895 if (rc) {
1896 dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
1897 retval = -EIO;
1898 }
1899
1900 rc = goya_stop_queue(hdev,
1901 mmTPC7_CMDQ_GLBL_CFG1,
1902 mmTPC7_CMDQ_CP_STS,
1903 mmTPC7_CMDQ_GLBL_STS0);
1904
1905 if (rc) {
1906 dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
1907 retval = -EIO;
1908 }
1909
1910 return retval;
1911}
1912
Oded Gabbay1251f232019-02-16 00:39:18 +02001913static void goya_dma_stall(struct hl_device *hdev)
1914{
1915 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
1916 WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
1917 WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
1918 WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
1919 WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
1920}
1921
1922static void goya_tpc_stall(struct hl_device *hdev)
1923{
1924 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
1925 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
1926 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
1927 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
1928 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
1929 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
1930 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
1931 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
1932}
1933
1934static void goya_mme_stall(struct hl_device *hdev)
1935{
1936 WREG32(mmMME_STALL, 0xFFFFFFFF);
1937}
1938
1939static int goya_enable_msix(struct hl_device *hdev)
1940{
1941 struct goya_device *goya = hdev->asic_specific;
1942 int cq_cnt = hdev->asic_prop.completion_queues_count;
1943 int rc, i, irq_cnt_init, irq;
1944
1945 if (goya->hw_cap_initialized & HW_CAP_MSIX)
1946 return 0;
1947
1948 rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
1949 GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
1950 if (rc < 0) {
1951 dev_err(hdev->dev,
1952 "MSI-X: Failed to enable support -- %d/%d\n",
1953 GOYA_MSIX_ENTRIES, rc);
1954 return rc;
1955 }
1956
1957 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
1958 irq = pci_irq_vector(hdev->pdev, i);
1959 rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
1960 &hdev->completion_queue[i]);
1961 if (rc) {
1962 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
1963 goto free_irqs;
1964 }
1965 }
1966
Oded Gabbayc535bfd2019-03-04 15:51:30 +02001967 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
Oded Gabbay1251f232019-02-16 00:39:18 +02001968
1969 rc = request_irq(irq, hl_irq_handler_eq, 0,
Oded Gabbayc535bfd2019-03-04 15:51:30 +02001970 goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX],
Oded Gabbay1251f232019-02-16 00:39:18 +02001971 &hdev->event_queue);
1972 if (rc) {
1973 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
1974 goto free_irqs;
1975 }
1976
1977 goya->hw_cap_initialized |= HW_CAP_MSIX;
1978 return 0;
1979
1980free_irqs:
1981 for (i = 0 ; i < irq_cnt_init ; i++)
1982 free_irq(pci_irq_vector(hdev->pdev, i),
1983 &hdev->completion_queue[i]);
1984
1985 pci_free_irq_vectors(hdev->pdev);
1986 return rc;
1987}
1988
1989static void goya_sync_irqs(struct hl_device *hdev)
1990{
1991 struct goya_device *goya = hdev->asic_specific;
1992 int i;
1993
1994 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
1995 return;
1996
1997 /* Wait for all pending IRQs to be finished */
1998 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1999 synchronize_irq(pci_irq_vector(hdev->pdev, i));
2000
Oded Gabbayc535bfd2019-03-04 15:51:30 +02002001 synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX));
Oded Gabbay1251f232019-02-16 00:39:18 +02002002}
2003
2004static void goya_disable_msix(struct hl_device *hdev)
2005{
2006 struct goya_device *goya = hdev->asic_specific;
2007 int i, irq;
2008
2009 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2010 return;
2011
2012 goya_sync_irqs(hdev);
2013
Oded Gabbayc535bfd2019-03-04 15:51:30 +02002014 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
Oded Gabbay1251f232019-02-16 00:39:18 +02002015 free_irq(irq, &hdev->event_queue);
2016
2017 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2018 irq = pci_irq_vector(hdev->pdev, i);
2019 free_irq(irq, &hdev->completion_queue[i]);
2020 }
2021
2022 pci_free_irq_vectors(hdev->pdev);
2023
2024 goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2025}
2026
2027static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2028{
2029 u32 wait_timeout_ms, cpu_timeout_ms;
2030
2031 dev_info(hdev->dev,
2032 "Halting compute engines and disabling interrupts\n");
2033
2034 if (hdev->pldm) {
2035 wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2036 cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2037 } else {
2038 wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2039 cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2040 }
2041
2042 if (hard_reset) {
2043 /*
2044 * I don't know what is the state of the CPU so make sure it is
2045 * stopped in any means necessary
2046 */
2047 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2048 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2049 GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2050 msleep(cpu_timeout_ms);
2051 }
2052
2053 goya_stop_external_queues(hdev);
2054 goya_stop_internal_queues(hdev);
2055
2056 msleep(wait_timeout_ms);
2057
2058 goya_dma_stall(hdev);
2059 goya_tpc_stall(hdev);
2060 goya_mme_stall(hdev);
2061
2062 msleep(wait_timeout_ms);
2063
2064 goya_disable_external_queues(hdev);
2065 goya_disable_internal_queues(hdev);
2066
2067 if (hard_reset)
2068 goya_disable_msix(hdev);
2069 else
2070 goya_sync_irqs(hdev);
2071}
Oded Gabbay839c4802019-02-16 00:39:16 +02002072
2073/*
Tomer Tayar3110c602019-03-04 10:22:09 +02002074 * goya_push_uboot_to_device() - Push u-boot FW code to device.
2075 * @hdev: Pointer to hl_device structure.
Oded Gabbay839c4802019-02-16 00:39:16 +02002076 *
Tomer Tayar3110c602019-03-04 10:22:09 +02002077 * Copy u-boot fw code from firmware file to SRAM BAR.
Oded Gabbay839c4802019-02-16 00:39:16 +02002078 *
Tomer Tayar3110c602019-03-04 10:22:09 +02002079 * Return: 0 on success, non-zero for failure.
Oded Gabbay839c4802019-02-16 00:39:16 +02002080 */
Tomer Tayar3110c602019-03-04 10:22:09 +02002081static int goya_push_uboot_to_device(struct hl_device *hdev)
Oded Gabbay839c4802019-02-16 00:39:16 +02002082{
Tomer Tayar3110c602019-03-04 10:22:09 +02002083 char fw_name[200];
2084 void __iomem *dst;
Oded Gabbay839c4802019-02-16 00:39:16 +02002085
Tomer Tayar3110c602019-03-04 10:22:09 +02002086 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
2087 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
Oded Gabbay839c4802019-02-16 00:39:16 +02002088
Tomer Tayar3110c602019-03-04 10:22:09 +02002089 return hl_fw_push_fw_to_device(hdev, fw_name, dst);
2090}
Oded Gabbay839c4802019-02-16 00:39:16 +02002091
Tomer Tayar3110c602019-03-04 10:22:09 +02002092/*
2093 * goya_push_linux_to_device() - Push LINUX FW code to device.
2094 * @hdev: Pointer to hl_device structure.
2095 *
2096 * Copy LINUX fw code from firmware file to HBM BAR.
2097 *
2098 * Return: 0 on success, non-zero for failure.
2099 */
2100static int goya_push_linux_to_device(struct hl_device *hdev)
2101{
2102 char fw_name[200];
2103 void __iomem *dst;
Oded Gabbay839c4802019-02-16 00:39:16 +02002104
Tomer Tayar3110c602019-03-04 10:22:09 +02002105 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2106 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
Oded Gabbay839c4802019-02-16 00:39:16 +02002107
Tomer Tayar3110c602019-03-04 10:22:09 +02002108 return hl_fw_push_fw_to_device(hdev, fw_name, dst);
Oded Gabbay839c4802019-02-16 00:39:16 +02002109}
2110
2111static int goya_pldm_init_cpu(struct hl_device *hdev)
2112{
Oded Gabbay839c4802019-02-16 00:39:16 +02002113 u32 val, unit_rst_val;
2114 int rc;
2115
2116 /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
2117 goya_init_golden_registers(hdev);
2118
2119 /* Put ARM cores into reset */
2120 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
2121 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2122
2123 /* Reset the CA53 MACRO */
2124 unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2125 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
2126 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2127 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
2128 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2129
Tomer Tayar3110c602019-03-04 10:22:09 +02002130 rc = goya_push_uboot_to_device(hdev);
Oded Gabbay839c4802019-02-16 00:39:16 +02002131 if (rc)
2132 return rc;
2133
Tomer Tayar3110c602019-03-04 10:22:09 +02002134 rc = goya_push_linux_to_device(hdev);
Oded Gabbay839c4802019-02-16 00:39:16 +02002135 if (rc)
2136 return rc;
2137
2138 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2139 WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA);
2140
2141 WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0,
2142 lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2143 WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0,
2144 upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2145
2146 /* Release ARM core 0 from reset */
2147 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
2148 CPU_RESET_CORE0_DEASSERT);
2149 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2150
2151 return 0;
2152}
2153
2154/*
2155 * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
2156 * The version string should be located by that offset.
2157 */
2158static void goya_read_device_fw_version(struct hl_device *hdev,
2159 enum goya_fw_component fwc)
2160{
2161 const char *name;
2162 u32 ver_off;
2163 char *dest;
2164
2165 switch (fwc) {
2166 case FW_COMP_UBOOT:
2167 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29);
2168 dest = hdev->asic_prop.uboot_ver;
2169 name = "U-Boot";
2170 break;
2171 case FW_COMP_PREBOOT:
2172 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28);
2173 dest = hdev->asic_prop.preboot_ver;
2174 name = "Preboot";
2175 break;
2176 default:
2177 dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2178 return;
2179 }
2180
2181 ver_off &= ~((u32)SRAM_BASE_ADDR);
2182
2183 if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
2184 memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
2185 VERSION_MAX_LEN);
2186 } else {
2187 dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
2188 name, ver_off);
2189 strcpy(dest, "unavailable");
2190 }
2191}
2192
2193static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
2194{
2195 struct goya_device *goya = hdev->asic_specific;
Oded Gabbay839c4802019-02-16 00:39:16 +02002196 u32 status;
2197 int rc;
2198
2199 if (!hdev->cpu_enable)
2200 return 0;
2201
2202 if (goya->hw_cap_initialized & HW_CAP_CPU)
2203 return 0;
2204
2205 /*
2206 * Before pushing u-boot/linux to device, need to set the ddr bar to
2207 * base address of dram
2208 */
2209 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2210 if (rc) {
2211 dev_err(hdev->dev,
2212 "failed to map DDR bar to DRAM base address\n");
2213 return rc;
2214 }
2215
2216 if (hdev->pldm) {
2217 rc = goya_pldm_init_cpu(hdev);
2218 if (rc)
2219 return rc;
2220
2221 goto out;
2222 }
2223
2224 /* Make sure CPU boot-loader is running */
2225 rc = hl_poll_timeout(
2226 hdev,
2227 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2228 status,
2229 (status == CPU_BOOT_STATUS_DRAM_RDY) ||
2230 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2231 10000,
2232 cpu_timeout);
2233
2234 if (rc) {
2235 dev_err(hdev->dev, "Error in ARM u-boot!");
2236 switch (status) {
2237 case CPU_BOOT_STATUS_NA:
2238 dev_err(hdev->dev,
2239 "ARM status %d - BTL did NOT run\n", status);
2240 break;
2241 case CPU_BOOT_STATUS_IN_WFE:
2242 dev_err(hdev->dev,
2243 "ARM status %d - Inside WFE loop\n", status);
2244 break;
2245 case CPU_BOOT_STATUS_IN_BTL:
2246 dev_err(hdev->dev,
2247 "ARM status %d - Stuck in BTL\n", status);
2248 break;
2249 case CPU_BOOT_STATUS_IN_PREBOOT:
2250 dev_err(hdev->dev,
2251 "ARM status %d - Stuck in Preboot\n", status);
2252 break;
2253 case CPU_BOOT_STATUS_IN_SPL:
2254 dev_err(hdev->dev,
2255 "ARM status %d - Stuck in SPL\n", status);
2256 break;
2257 case CPU_BOOT_STATUS_IN_UBOOT:
2258 dev_err(hdev->dev,
2259 "ARM status %d - Stuck in u-boot\n", status);
2260 break;
2261 case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
2262 dev_err(hdev->dev,
2263 "ARM status %d - DDR initialization failed\n",
2264 status);
2265 break;
Igor Grinberg0ca3b1b2019-02-24 11:20:02 +02002266 case CPU_BOOT_STATUS_UBOOT_NOT_READY:
2267 dev_err(hdev->dev,
2268 "ARM status %d - u-boot stopped by user\n",
2269 status);
2270 break;
Oded Gabbay839c4802019-02-16 00:39:16 +02002271 default:
2272 dev_err(hdev->dev,
2273 "ARM status %d - Invalid status code\n",
2274 status);
2275 break;
2276 }
2277 return -EIO;
2278 }
2279
2280 /* Read U-Boot version now in case we will later fail */
2281 goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
2282 goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
2283
2284 if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
2285 goto out;
2286
2287 if (!hdev->fw_loading) {
2288 dev_info(hdev->dev, "Skip loading FW\n");
2289 goto out;
2290 }
2291
Tomer Tayar3110c602019-03-04 10:22:09 +02002292 rc = goya_push_linux_to_device(hdev);
Oded Gabbay839c4802019-02-16 00:39:16 +02002293 if (rc)
2294 return rc;
2295
2296 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2297
2298 rc = hl_poll_timeout(
2299 hdev,
2300 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2301 status,
2302 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2303 10000,
2304 cpu_timeout);
2305
2306 if (rc) {
2307 if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
2308 dev_err(hdev->dev,
2309 "ARM u-boot reports FIT image is corrupted\n");
2310 else
2311 dev_err(hdev->dev,
2312 "ARM Linux failed to load, %d\n", status);
2313 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA);
2314 return -EIO;
2315 }
2316
2317 dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2318
2319out:
2320 goya->hw_cap_initialized |= HW_CAP_CPU;
2321
2322 return 0;
2323}
2324
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002325static int goya_mmu_init(struct hl_device *hdev)
2326{
2327 struct asic_fixed_properties *prop = &hdev->asic_prop;
2328 struct goya_device *goya = hdev->asic_specific;
2329 u64 hop0_addr;
2330 int rc, i;
2331
2332 if (!hdev->mmu_enable)
2333 return 0;
2334
2335 if (goya->hw_cap_initialized & HW_CAP_MMU)
2336 return 0;
2337
2338 hdev->dram_supports_virtual_memory = true;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02002339 hdev->dram_default_page_mapping = true;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002340
2341 for (i = 0 ; i < prop->max_asid ; i++) {
2342 hop0_addr = prop->mmu_pgt_addr +
2343 (i * prop->mmu_hop_table_size);
2344
2345 rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2346 if (rc) {
2347 dev_err(hdev->dev,
2348 "failed to set hop0 addr for asid %d\n", i);
2349 goto err;
2350 }
2351 }
2352
2353 goya->hw_cap_initialized |= HW_CAP_MMU;
2354
2355 /* init MMU cache manage page */
Oded Gabbay1e7c1ec2019-02-28 10:46:13 +02002356 WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2357 lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2358 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002359
2360 /* Remove follower feature due to performance bug */
2361 WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2362 (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2363
2364 hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
2365
2366 WREG32(mmMMU_MMU_ENABLE, 1);
2367 WREG32(mmMMU_SPI_MASK, 0xF);
2368
2369 return 0;
2370
2371err:
2372 return rc;
2373}
2374
Oded Gabbay839c4802019-02-16 00:39:16 +02002375/*
2376 * goya_hw_init - Goya hardware initialization code
2377 *
2378 * @hdev: pointer to hl_device structure
2379 *
2380 * Returns 0 on success
2381 *
2382 */
2383static int goya_hw_init(struct hl_device *hdev)
2384{
2385 struct asic_fixed_properties *prop = &hdev->asic_prop;
2386 u32 val;
2387 int rc;
2388
2389 dev_info(hdev->dev, "Starting initialization of H/W\n");
2390
2391 /* Perform read from the device to make sure device is up */
2392 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2393
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02002394 /*
2395 * Let's mark in the H/W that we have reached this point. We check
2396 * this value in the reset_before_init function to understand whether
2397 * we need to reset the chip before doing H/W init. This register is
2398 * cleared by the H/W upon H/W reset
2399 */
2400 WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY);
2401
Oded Gabbay839c4802019-02-16 00:39:16 +02002402 rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
2403 if (rc) {
2404 dev_err(hdev->dev, "failed to initialize CPU\n");
2405 return rc;
2406 }
2407
2408 goya_tpc_mbist_workaround(hdev);
2409
2410 goya_init_golden_registers(hdev);
2411
2412 /*
2413 * After CPU initialization is finished, change DDR bar mapping inside
2414 * iATU to point to the start address of the MMU page tables
2415 */
2416 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
2417 (MMU_PAGE_TABLES_ADDR & ~(prop->dram_pci_bar_size - 0x1ull)));
2418 if (rc) {
2419 dev_err(hdev->dev,
2420 "failed to map DDR bar to MMU page tables\n");
2421 return rc;
2422 }
2423
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002424 rc = goya_mmu_init(hdev);
2425 if (rc)
2426 return rc;
2427
Oded Gabbay839c4802019-02-16 00:39:16 +02002428 goya_init_security(hdev);
2429
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002430 goya_init_dma_qmans(hdev);
2431
2432 goya_init_mme_qmans(hdev);
2433
2434 goya_init_tpc_qmans(hdev);
2435
Oded Gabbay1251f232019-02-16 00:39:18 +02002436 /* MSI-X must be enabled before CPU queues are initialized */
2437 rc = goya_enable_msix(hdev);
2438 if (rc)
2439 goto disable_queues;
2440
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002441 rc = goya_init_cpu_queues(hdev);
2442 if (rc) {
2443 dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
2444 rc);
Oded Gabbay1251f232019-02-16 00:39:18 +02002445 goto disable_msix;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002446 }
2447
Oded Gabbayd9973872019-03-07 18:03:23 +02002448 /*
2449 * Check if we managed to set the DMA mask to more then 32 bits. If so,
2450 * let's try to increase it again because in Goya we set the initial
2451 * dma mask to less then 39 bits so that the allocation of the memory
2452 * area for the device's cpu will be under 39 bits
2453 */
2454 if (hdev->dma_mask > 32) {
2455 rc = hl_pci_set_dma_mask(hdev, 48);
2456 if (rc)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002457 goto disable_pci_access;
Oded Gabbay839c4802019-02-16 00:39:16 +02002458 }
2459
2460 /* Perform read from the device to flush all MSI-X configuration */
2461 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2462
2463 return 0;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002464
2465disable_pci_access:
Tomer Tayar3110c602019-03-04 10:22:09 +02002466 hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
Oded Gabbay1251f232019-02-16 00:39:18 +02002467disable_msix:
2468 goya_disable_msix(hdev);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002469disable_queues:
2470 goya_disable_internal_queues(hdev);
2471 goya_disable_external_queues(hdev);
2472
2473 return rc;
Oded Gabbay839c4802019-02-16 00:39:16 +02002474}
2475
2476/*
2477 * goya_hw_fini - Goya hardware tear-down code
2478 *
2479 * @hdev: pointer to hl_device structure
2480 * @hard_reset: should we do hard reset to all engines or just reset the
2481 * compute/dma engines
2482 */
2483static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
2484{
2485 struct goya_device *goya = hdev->asic_specific;
2486 u32 reset_timeout_ms, status;
2487
2488 if (hdev->pldm)
2489 reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2490 else
2491 reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2492
2493 if (hard_reset) {
2494 goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2495 goya_disable_clk_rlx(hdev);
2496 goya_set_pll_refclk(hdev);
2497
2498 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2499 dev_info(hdev->dev,
2500 "Issued HARD reset command, going to wait %dms\n",
2501 reset_timeout_ms);
2502 } else {
2503 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2504 dev_info(hdev->dev,
2505 "Issued SOFT reset command, going to wait %dms\n",
2506 reset_timeout_ms);
2507 }
2508
2509 /*
2510 * After hard reset, we can't poll the BTM_FSM register because the PSOC
2511 * itself is in reset. In either reset we need to wait until the reset
2512 * is deasserted
2513 */
2514 msleep(reset_timeout_ms);
2515
2516 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2517 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
2518 dev_err(hdev->dev,
2519 "Timeout while waiting for device to reset 0x%x\n",
2520 status);
2521
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02002522 if (!hard_reset) {
2523 goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2524 HW_CAP_GOLDEN | HW_CAP_TPC);
2525 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2526 GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2527 return;
2528 }
2529
Oded Gabbay839c4802019-02-16 00:39:16 +02002530 /* Chicken bit to re-initiate boot sequencer flow */
2531 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2532 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2533 /* Move boot manager FSM to pre boot sequencer init state */
2534 WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2535 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2536
2537 goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2538 HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2539 HW_CAP_DMA | HW_CAP_MME |
2540 HW_CAP_MMU | HW_CAP_TPC_MBIST |
2541 HW_CAP_GOLDEN | HW_CAP_TPC);
Oded Gabbay1251f232019-02-16 00:39:18 +02002542 memset(goya->events_stat, 0, sizeof(goya->events_stat));
Oded Gabbay839c4802019-02-16 00:39:16 +02002543
2544 if (!hdev->pldm) {
2545 int rc;
2546 /* In case we are running inside VM and the VM is
2547 * shutting down, we need to make sure CPU boot-loader
2548 * is running before we can continue the VM shutdown.
2549 * That is because the VM will send an FLR signal that
2550 * we must answer
2551 */
2552 dev_info(hdev->dev,
2553 "Going to wait up to %ds for CPU boot loader\n",
2554 GOYA_CPU_TIMEOUT_USEC / 1000 / 1000);
2555
2556 rc = hl_poll_timeout(
2557 hdev,
2558 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2559 status,
2560 (status == CPU_BOOT_STATUS_DRAM_RDY),
2561 10000,
2562 GOYA_CPU_TIMEOUT_USEC);
2563 if (rc)
2564 dev_err(hdev->dev,
2565 "failed to wait for CPU boot loader\n");
2566 }
2567}
2568
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002569int goya_suspend(struct hl_device *hdev)
2570{
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002571 int rc;
2572
Tomer Tayar3110c602019-03-04 10:22:09 +02002573 rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002574 if (rc)
2575 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2576
2577 return rc;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002578}
2579
2580int goya_resume(struct hl_device *hdev)
2581{
Oded Gabbay7cb51012019-03-03 22:29:20 +02002582 return goya_init_iatu(hdev);
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002583}
2584
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002585static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
Oded Gabbaybe5d9262019-02-16 00:39:15 +02002586 u64 kaddress, phys_addr_t paddress, u32 size)
2587{
2588 int rc;
2589
2590 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2591 VM_DONTCOPY | VM_NORESERVE;
2592
2593 rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
2594 size, vma->vm_page_prot);
2595 if (rc)
2596 dev_err(hdev->dev, "remap_pfn_range error %d", rc);
2597
2598 return rc;
2599}
2600
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002601static void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002602{
2603 u32 db_reg_offset, db_value;
2604 bool invalid_queue = false;
2605
2606 switch (hw_queue_id) {
2607 case GOYA_QUEUE_ID_DMA_0:
2608 db_reg_offset = mmDMA_QM_0_PQ_PI;
2609 break;
2610
2611 case GOYA_QUEUE_ID_DMA_1:
2612 db_reg_offset = mmDMA_QM_1_PQ_PI;
2613 break;
2614
2615 case GOYA_QUEUE_ID_DMA_2:
2616 db_reg_offset = mmDMA_QM_2_PQ_PI;
2617 break;
2618
2619 case GOYA_QUEUE_ID_DMA_3:
2620 db_reg_offset = mmDMA_QM_3_PQ_PI;
2621 break;
2622
2623 case GOYA_QUEUE_ID_DMA_4:
2624 db_reg_offset = mmDMA_QM_4_PQ_PI;
2625 break;
2626
2627 case GOYA_QUEUE_ID_CPU_PQ:
2628 if (hdev->cpu_queues_enable)
2629 db_reg_offset = mmCPU_IF_PF_PQ_PI;
2630 else
2631 invalid_queue = true;
2632 break;
2633
2634 case GOYA_QUEUE_ID_MME:
2635 db_reg_offset = mmMME_QM_PQ_PI;
2636 break;
2637
2638 case GOYA_QUEUE_ID_TPC0:
2639 db_reg_offset = mmTPC0_QM_PQ_PI;
2640 break;
2641
2642 case GOYA_QUEUE_ID_TPC1:
2643 db_reg_offset = mmTPC1_QM_PQ_PI;
2644 break;
2645
2646 case GOYA_QUEUE_ID_TPC2:
2647 db_reg_offset = mmTPC2_QM_PQ_PI;
2648 break;
2649
2650 case GOYA_QUEUE_ID_TPC3:
2651 db_reg_offset = mmTPC3_QM_PQ_PI;
2652 break;
2653
2654 case GOYA_QUEUE_ID_TPC4:
2655 db_reg_offset = mmTPC4_QM_PQ_PI;
2656 break;
2657
2658 case GOYA_QUEUE_ID_TPC5:
2659 db_reg_offset = mmTPC5_QM_PQ_PI;
2660 break;
2661
2662 case GOYA_QUEUE_ID_TPC6:
2663 db_reg_offset = mmTPC6_QM_PQ_PI;
2664 break;
2665
2666 case GOYA_QUEUE_ID_TPC7:
2667 db_reg_offset = mmTPC7_QM_PQ_PI;
2668 break;
2669
2670 default:
2671 invalid_queue = true;
2672 }
2673
2674 if (invalid_queue) {
2675 /* Should never get here */
2676 dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
2677 hw_queue_id);
2678 return;
2679 }
2680
2681 db_value = pi;
2682
2683 /* ring the doorbell */
2684 WREG32(db_reg_offset, db_value);
2685
2686 if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
2687 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2688 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
2689}
2690
2691void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
2692{
2693 /* Not needed in Goya */
2694}
2695
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002696static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002697 dma_addr_t *dma_handle, gfp_t flags)
2698{
2699 return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags);
2700}
2701
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002702static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
2703 void *cpu_addr, dma_addr_t dma_handle)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002704{
2705 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle);
2706}
2707
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002708void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
2709 dma_addr_t *dma_handle, u16 *queue_len)
2710{
2711 void *base;
2712 u32 offset;
2713
2714 *dma_handle = hdev->asic_prop.sram_base_address;
2715
Oded Gabbay7c222782019-03-03 10:23:29 +02002716 base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002717
2718 switch (queue_id) {
2719 case GOYA_QUEUE_ID_MME:
2720 offset = MME_QMAN_BASE_OFFSET;
2721 *queue_len = MME_QMAN_LENGTH;
2722 break;
2723 case GOYA_QUEUE_ID_TPC0:
2724 offset = TPC0_QMAN_BASE_OFFSET;
2725 *queue_len = TPC_QMAN_LENGTH;
2726 break;
2727 case GOYA_QUEUE_ID_TPC1:
2728 offset = TPC1_QMAN_BASE_OFFSET;
2729 *queue_len = TPC_QMAN_LENGTH;
2730 break;
2731 case GOYA_QUEUE_ID_TPC2:
2732 offset = TPC2_QMAN_BASE_OFFSET;
2733 *queue_len = TPC_QMAN_LENGTH;
2734 break;
2735 case GOYA_QUEUE_ID_TPC3:
2736 offset = TPC3_QMAN_BASE_OFFSET;
2737 *queue_len = TPC_QMAN_LENGTH;
2738 break;
2739 case GOYA_QUEUE_ID_TPC4:
2740 offset = TPC4_QMAN_BASE_OFFSET;
2741 *queue_len = TPC_QMAN_LENGTH;
2742 break;
2743 case GOYA_QUEUE_ID_TPC5:
2744 offset = TPC5_QMAN_BASE_OFFSET;
2745 *queue_len = TPC_QMAN_LENGTH;
2746 break;
2747 case GOYA_QUEUE_ID_TPC6:
2748 offset = TPC6_QMAN_BASE_OFFSET;
2749 *queue_len = TPC_QMAN_LENGTH;
2750 break;
2751 case GOYA_QUEUE_ID_TPC7:
2752 offset = TPC7_QMAN_BASE_OFFSET;
2753 *queue_len = TPC_QMAN_LENGTH;
2754 break;
2755 default:
2756 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
2757 return NULL;
2758 }
2759
2760 base += offset;
2761 *dma_handle += offset;
2762
2763 return base;
2764}
2765
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002766static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02002767{
2768 struct goya_device *goya = hdev->asic_specific;
2769 struct packet_msg_prot *fence_pkt;
2770 u32 *fence_ptr;
2771 dma_addr_t fence_dma_addr;
2772 struct hl_cb *cb;
Omer Shpigelman3dccd182019-02-28 10:46:16 +02002773 u32 tmp, timeout;
Tomer Tayarc811f7b2019-03-07 14:26:02 +02002774 char buf[16] = {};
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02002775 int rc;
2776
Omer Shpigelman3dccd182019-02-28 10:46:16 +02002777 if (hdev->pldm)
2778 timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
2779 else
2780 timeout = HL_DEVICE_TIMEOUT_USEC;
2781
Tomer Tayarc811f7b2019-03-07 14:26:02 +02002782 if (!hdev->asic_funcs->is_device_idle(hdev, buf, sizeof(buf))) {
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02002783 dev_err_ratelimited(hdev->dev,
Tomer Tayarc811f7b2019-03-07 14:26:02 +02002784 "Can't send KMD job on QMAN0 because %s is busy\n",
2785 buf);
Oded Gabbayaf5f7ee2019-02-28 10:46:21 +02002786 return -EBUSY;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02002787 }
2788
2789 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
2790 &fence_dma_addr);
2791 if (!fence_ptr) {
2792 dev_err(hdev->dev,
2793 "Failed to allocate fence memory for QMAN0\n");
2794 return -ENOMEM;
2795 }
2796
2797 *fence_ptr = 0;
2798
2799 if (goya->hw_cap_initialized & HW_CAP_MMU) {
2800 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
2801 RREG32(mmDMA_QM_0_GLBL_PROT);
2802 }
2803
2804 /*
2805 * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For
2806 * synchronized kernel jobs we only need space for 1 packet_msg_prot
2807 */
2808 job->job_cb_size -= sizeof(struct packet_msg_prot);
2809
2810 cb = job->patched_cb;
2811
2812 fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
2813 job->job_cb_size - sizeof(struct packet_msg_prot));
2814
Tomer Tayardf697bc2019-02-28 10:46:22 +02002815 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02002816 (1 << GOYA_PKT_CTL_EB_SHIFT) |
2817 (1 << GOYA_PKT_CTL_MB_SHIFT);
Tomer Tayardf697bc2019-02-28 10:46:22 +02002818 fence_pkt->ctl = cpu_to_le32(tmp);
2819 fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
2820 fence_pkt->addr = cpu_to_le64(fence_dma_addr +
2821 hdev->asic_prop.host_phys_base_address);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02002822
2823 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
2824 job->job_cb_size, cb->bus_address);
2825 if (rc) {
2826 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
2827 goto free_fence_ptr;
2828 }
2829
Omer Shpigelman3dccd182019-02-28 10:46:16 +02002830 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, timeout,
2831 &tmp);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02002832
2833 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
2834
2835 if ((rc) || (tmp != GOYA_QMAN0_FENCE_VAL)) {
2836 dev_err(hdev->dev, "QMAN0 Job hasn't finished in time\n");
2837 rc = -ETIMEDOUT;
2838 }
2839
2840free_fence_ptr:
2841 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
2842 fence_dma_addr);
2843
2844 if (goya->hw_cap_initialized & HW_CAP_MMU) {
2845 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
2846 RREG32(mmDMA_QM_0_GLBL_PROT);
2847 }
2848
2849 return rc;
2850}
2851
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002852int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
2853 u32 timeout, long *result)
2854{
2855 struct goya_device *goya = hdev->asic_specific;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002856
2857 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
2858 if (result)
2859 *result = 0;
2860 return 0;
2861 }
2862
Tomer Tayar3110c602019-03-04 10:22:09 +02002863 return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
2864 timeout, result);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002865}
2866
2867int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
2868{
2869 struct packet_msg_prot *fence_pkt;
2870 dma_addr_t pkt_dma_addr;
2871 u32 fence_val, tmp;
2872 dma_addr_t fence_dma_addr;
2873 u32 *fence_ptr;
2874 int rc;
2875
2876 fence_val = GOYA_QMAN0_FENCE_VAL;
2877
2878 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
2879 &fence_dma_addr);
2880 if (!fence_ptr) {
2881 dev_err(hdev->dev,
2882 "Failed to allocate memory for queue testing\n");
2883 return -ENOMEM;
2884 }
2885
2886 *fence_ptr = 0;
2887
2888 fence_pkt = hdev->asic_funcs->dma_pool_zalloc(hdev,
2889 sizeof(struct packet_msg_prot),
2890 GFP_KERNEL, &pkt_dma_addr);
2891 if (!fence_pkt) {
2892 dev_err(hdev->dev,
2893 "Failed to allocate packet for queue testing\n");
2894 rc = -ENOMEM;
2895 goto free_fence_ptr;
2896 }
2897
Tomer Tayardf697bc2019-02-28 10:46:22 +02002898 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002899 (1 << GOYA_PKT_CTL_EB_SHIFT) |
2900 (1 << GOYA_PKT_CTL_MB_SHIFT);
Tomer Tayardf697bc2019-02-28 10:46:22 +02002901 fence_pkt->ctl = cpu_to_le32(tmp);
2902 fence_pkt->value = cpu_to_le32(fence_val);
2903 fence_pkt->addr = cpu_to_le64(fence_dma_addr +
2904 hdev->asic_prop.host_phys_base_address);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002905
2906 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
2907 sizeof(struct packet_msg_prot),
2908 pkt_dma_addr);
2909 if (rc) {
2910 dev_err(hdev->dev,
2911 "Failed to send fence packet\n");
2912 goto free_pkt;
2913 }
2914
2915 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr,
2916 GOYA_TEST_QUEUE_WAIT_USEC, &tmp);
2917
2918 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
2919
2920 if ((!rc) && (tmp == fence_val)) {
2921 dev_info(hdev->dev,
2922 "queue test on H/W queue %d succeeded\n",
2923 hw_queue_id);
2924 } else {
2925 dev_err(hdev->dev,
2926 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
2927 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
2928 rc = -EINVAL;
2929 }
2930
2931free_pkt:
2932 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_pkt,
2933 pkt_dma_addr);
2934free_fence_ptr:
2935 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
2936 fence_dma_addr);
2937 return rc;
2938}
2939
2940int goya_test_cpu_queue(struct hl_device *hdev)
2941{
Tomer Tayar3110c602019-03-04 10:22:09 +02002942 struct goya_device *goya = hdev->asic_specific;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002943
Tomer Tayar3110c602019-03-04 10:22:09 +02002944 /*
2945 * check capability here as send_cpu_message() won't update the result
2946 * value if no capability
2947 */
2948 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
2949 return 0;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002950
Tomer Tayar3110c602019-03-04 10:22:09 +02002951 return hl_fw_test_cpu_queue(hdev);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002952}
2953
2954static int goya_test_queues(struct hl_device *hdev)
2955{
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002956 int i, rc, ret_val = 0;
2957
2958 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
2959 rc = goya_test_queue(hdev, i);
2960 if (rc)
2961 ret_val = -EINVAL;
2962 }
2963
2964 if (hdev->cpu_queues_enable) {
Tomer Tayar393e5b52019-03-06 14:30:26 +02002965 rc = goya_test_cpu_queue(hdev);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002966 if (rc)
2967 ret_val = -EINVAL;
2968 }
2969
2970 return ret_val;
2971}
2972
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002973static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
2974 gfp_t mem_flags, dma_addr_t *dma_handle)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002975{
2976 if (size > GOYA_DMA_POOL_BLK_SIZE)
2977 return NULL;
2978
2979 return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
2980}
2981
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002982static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
2983 dma_addr_t dma_addr)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002984{
2985 dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
2986}
2987
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002988static void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
2989 size_t size, dma_addr_t *dma_handle)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002990{
Tomer Tayar3110c602019-03-04 10:22:09 +02002991 return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002992}
2993
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002994static void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev,
2995 size_t size, void *vaddr)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002996{
Tomer Tayar3110c602019-03-04 10:22:09 +02002997 hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002998}
2999
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003000static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg,
3001 int nents, enum dma_data_direction dir)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003002{
3003 if (!dma_map_sg(&hdev->pdev->dev, sg, nents, dir))
3004 return -ENOMEM;
3005
3006 return 0;
3007}
3008
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003009static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sg,
3010 int nents, enum dma_data_direction dir)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003011{
3012 dma_unmap_sg(&hdev->pdev->dev, sg, nents, dir);
3013}
3014
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003015u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003016{
3017 struct scatterlist *sg, *sg_next_iter;
Oded Gabbaye99f16832019-02-24 11:55:26 +02003018 u32 count, dma_desc_cnt;
3019 u64 len, len_next;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003020 dma_addr_t addr, addr_next;
3021
3022 dma_desc_cnt = 0;
3023
3024 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3025
3026 len = sg_dma_len(sg);
3027 addr = sg_dma_address(sg);
3028
3029 if (len == 0)
3030 break;
3031
3032 while ((count + 1) < sgt->nents) {
3033 sg_next_iter = sg_next(sg);
3034 len_next = sg_dma_len(sg_next_iter);
3035 addr_next = sg_dma_address(sg_next_iter);
3036
3037 if (len_next == 0)
3038 break;
3039
3040 if ((addr + len == addr_next) &&
3041 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3042 len += len_next;
3043 count++;
3044 sg = sg_next_iter;
3045 } else {
3046 break;
3047 }
3048 }
3049
3050 dma_desc_cnt++;
3051 }
3052
3053 return dma_desc_cnt * sizeof(struct packet_lin_dma);
3054}
3055
3056static int goya_pin_memory_before_cs(struct hl_device *hdev,
3057 struct hl_cs_parser *parser,
3058 struct packet_lin_dma *user_dma_pkt,
3059 u64 addr, enum dma_data_direction dir)
3060{
3061 struct hl_userptr *userptr;
3062 int rc;
3063
Tomer Tayardf697bc2019-02-28 10:46:22 +02003064 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003065 parser->job_userptr_list, &userptr))
3066 goto already_pinned;
3067
3068 userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
3069 if (!userptr)
3070 return -ENOMEM;
3071
Tomer Tayardf697bc2019-02-28 10:46:22 +02003072 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3073 userptr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003074 if (rc)
3075 goto free_userptr;
3076
3077 list_add_tail(&userptr->job_node, parser->job_userptr_list);
3078
3079 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
3080 userptr->sgt->nents, dir);
3081 if (rc) {
3082 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3083 goto unpin_memory;
3084 }
3085
3086 userptr->dma_mapped = true;
3087 userptr->dir = dir;
3088
3089already_pinned:
3090 parser->patched_cb_size +=
3091 goya_get_dma_desc_list_size(hdev, userptr->sgt);
3092
3093 return 0;
3094
3095unpin_memory:
3096 hl_unpin_host_memory(hdev, userptr);
3097free_userptr:
3098 kfree(userptr);
3099 return rc;
3100}
3101
3102static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3103 struct hl_cs_parser *parser,
3104 struct packet_lin_dma *user_dma_pkt)
3105{
3106 u64 device_memory_addr, addr;
3107 enum dma_data_direction dir;
3108 enum goya_dma_direction user_dir;
3109 bool sram_addr = true;
3110 bool skip_host_mem_pin = false;
3111 bool user_memset;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003112 u32 ctl;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003113 int rc = 0;
3114
Tomer Tayardf697bc2019-02-28 10:46:22 +02003115 ctl = le32_to_cpu(user_dma_pkt->ctl);
3116
3117 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003118 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3119
Tomer Tayardf697bc2019-02-28 10:46:22 +02003120 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003121 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3122
3123 switch (user_dir) {
3124 case DMA_HOST_TO_DRAM:
3125 dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3126 dir = DMA_TO_DEVICE;
3127 sram_addr = false;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003128 addr = le64_to_cpu(user_dma_pkt->src_addr);
3129 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003130 if (user_memset)
3131 skip_host_mem_pin = true;
3132 break;
3133
3134 case DMA_DRAM_TO_HOST:
3135 dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3136 dir = DMA_FROM_DEVICE;
3137 sram_addr = false;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003138 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3139 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003140 break;
3141
3142 case DMA_HOST_TO_SRAM:
3143 dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3144 dir = DMA_TO_DEVICE;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003145 addr = le64_to_cpu(user_dma_pkt->src_addr);
3146 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003147 if (user_memset)
3148 skip_host_mem_pin = true;
3149 break;
3150
3151 case DMA_SRAM_TO_HOST:
3152 dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3153 dir = DMA_FROM_DEVICE;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003154 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3155 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003156 break;
3157 default:
3158 dev_err(hdev->dev, "DMA direction is undefined\n");
3159 return -EFAULT;
3160 }
3161
3162 if (parser->ctx_id != HL_KERNEL_ASID_ID) {
3163 if (sram_addr) {
3164 if (!hl_mem_area_inside_range(device_memory_addr,
Tomer Tayardf697bc2019-02-28 10:46:22 +02003165 le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003166 hdev->asic_prop.sram_user_base_address,
3167 hdev->asic_prop.sram_end_address)) {
3168
3169 dev_err(hdev->dev,
3170 "SRAM address 0x%llx + 0x%x is invalid\n",
3171 device_memory_addr,
3172 user_dma_pkt->tsize);
3173 return -EFAULT;
3174 }
3175 } else {
3176 if (!hl_mem_area_inside_range(device_memory_addr,
Tomer Tayardf697bc2019-02-28 10:46:22 +02003177 le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003178 hdev->asic_prop.dram_user_base_address,
3179 hdev->asic_prop.dram_end_address)) {
3180
3181 dev_err(hdev->dev,
3182 "DRAM address 0x%llx + 0x%x is invalid\n",
3183 device_memory_addr,
3184 user_dma_pkt->tsize);
3185 return -EFAULT;
3186 }
3187 }
3188 }
3189
3190 if (skip_host_mem_pin)
3191 parser->patched_cb_size += sizeof(*user_dma_pkt);
3192 else {
3193 if ((dir == DMA_TO_DEVICE) &&
3194 (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3195 dev_err(hdev->dev,
3196 "Can't DMA from host on queue other then 1\n");
3197 return -EFAULT;
3198 }
3199
3200 rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3201 addr, dir);
3202 }
3203
3204 return rc;
3205}
3206
3207static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3208 struct hl_cs_parser *parser,
3209 struct packet_lin_dma *user_dma_pkt)
3210{
3211 u64 sram_memory_addr, dram_memory_addr;
3212 enum goya_dma_direction user_dir;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003213 u32 ctl;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003214
Tomer Tayardf697bc2019-02-28 10:46:22 +02003215 ctl = le32_to_cpu(user_dma_pkt->ctl);
3216 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003217 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3218
3219 if (user_dir == DMA_DRAM_TO_SRAM) {
3220 dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
Tomer Tayardf697bc2019-02-28 10:46:22 +02003221 dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3222 sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003223 } else {
3224 dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
Tomer Tayardf697bc2019-02-28 10:46:22 +02003225 sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3226 dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003227 }
3228
Tomer Tayardf697bc2019-02-28 10:46:22 +02003229 if (!hl_mem_area_inside_range(sram_memory_addr,
3230 le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003231 hdev->asic_prop.sram_user_base_address,
3232 hdev->asic_prop.sram_end_address)) {
3233 dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3234 sram_memory_addr, user_dma_pkt->tsize);
3235 return -EFAULT;
3236 }
3237
Tomer Tayardf697bc2019-02-28 10:46:22 +02003238 if (!hl_mem_area_inside_range(dram_memory_addr,
3239 le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003240 hdev->asic_prop.dram_user_base_address,
3241 hdev->asic_prop.dram_end_address)) {
3242 dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3243 dram_memory_addr, user_dma_pkt->tsize);
3244 return -EFAULT;
3245 }
3246
3247 parser->patched_cb_size += sizeof(*user_dma_pkt);
3248
3249 return 0;
3250}
3251
3252static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3253 struct hl_cs_parser *parser,
3254 struct packet_lin_dma *user_dma_pkt)
3255{
3256 enum goya_dma_direction user_dir;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003257 u32 ctl;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003258 int rc;
3259
3260 dev_dbg(hdev->dev, "DMA packet details:\n");
3261 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3262 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3263 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3264
Tomer Tayardf697bc2019-02-28 10:46:22 +02003265 ctl = le32_to_cpu(user_dma_pkt->ctl);
3266 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003267 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3268
3269 /*
3270 * Special handling for DMA with size 0. The H/W has a bug where
3271 * this can cause the QMAN DMA to get stuck, so block it here.
3272 */
3273 if (user_dma_pkt->tsize == 0) {
3274 dev_err(hdev->dev,
3275 "Got DMA with size 0, might reset the device\n");
3276 return -EINVAL;
3277 }
3278
3279 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
3280 rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3281 else
3282 rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3283
3284 return rc;
3285}
3286
3287static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3288 struct hl_cs_parser *parser,
3289 struct packet_lin_dma *user_dma_pkt)
3290{
3291 dev_dbg(hdev->dev, "DMA packet details:\n");
3292 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3293 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3294 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3295
3296 /*
3297 * WA for HW-23.
3298 * We can't allow user to read from Host using QMANs other than 1.
3299 */
3300 if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
Tomer Tayardf697bc2019-02-28 10:46:22 +02003301 hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
3302 le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003303 hdev->asic_prop.va_space_host_start_address,
3304 hdev->asic_prop.va_space_host_end_address)) {
3305 dev_err(hdev->dev,
3306 "Can't DMA from host on queue other then 1\n");
3307 return -EFAULT;
3308 }
3309
3310 if (user_dma_pkt->tsize == 0) {
3311 dev_err(hdev->dev,
3312 "Got DMA with size 0, might reset the device\n");
3313 return -EINVAL;
3314 }
3315
3316 parser->patched_cb_size += sizeof(*user_dma_pkt);
3317
3318 return 0;
3319}
3320
3321static int goya_validate_wreg32(struct hl_device *hdev,
3322 struct hl_cs_parser *parser,
3323 struct packet_wreg32 *wreg_pkt)
3324{
3325 struct goya_device *goya = hdev->asic_specific;
3326 u32 sob_start_addr, sob_end_addr;
3327 u16 reg_offset;
3328
Tomer Tayardf697bc2019-02-28 10:46:22 +02003329 reg_offset = le32_to_cpu(wreg_pkt->ctl) &
3330 GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003331
3332 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3333 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3334 dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
3335
Oded Gabbay6765fda2019-02-28 10:46:14 +02003336 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003337 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3338 reg_offset);
3339 return -EPERM;
3340 }
3341
3342 /*
3343 * With MMU, DMA channels are not secured, so it doesn't matter where
3344 * the WR COMP will be written to because it will go out with
3345 * non-secured property
3346 */
3347 if (goya->hw_cap_initialized & HW_CAP_MMU)
3348 return 0;
3349
3350 sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3351 sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3352
Tomer Tayardf697bc2019-02-28 10:46:22 +02003353 if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
3354 (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003355
3356 dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3357 wreg_pkt->value);
3358 return -EPERM;
3359 }
3360
3361 return 0;
3362}
3363
3364static int goya_validate_cb(struct hl_device *hdev,
3365 struct hl_cs_parser *parser, bool is_mmu)
3366{
3367 u32 cb_parsed_length = 0;
3368 int rc = 0;
3369
3370 parser->patched_cb_size = 0;
3371
3372 /* cb_user_size is more than 0 so loop will always be executed */
3373 while (cb_parsed_length < parser->user_cb_size) {
3374 enum packet_id pkt_id;
3375 u16 pkt_size;
3376 void *user_pkt;
3377
3378 user_pkt = (void *) (uintptr_t)
3379 (parser->user_cb->kernel_address + cb_parsed_length);
3380
3381 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
3382 PACKET_HEADER_PACKET_ID_MASK) >>
3383 PACKET_HEADER_PACKET_ID_SHIFT);
3384
3385 pkt_size = goya_packet_sizes[pkt_id];
3386 cb_parsed_length += pkt_size;
3387 if (cb_parsed_length > parser->user_cb_size) {
3388 dev_err(hdev->dev,
3389 "packet 0x%x is out of CB boundary\n", pkt_id);
3390 rc = -EINVAL;
3391 break;
3392 }
3393
3394 switch (pkt_id) {
3395 case PACKET_WREG_32:
3396 /*
3397 * Although it is validated after copy in patch_cb(),
3398 * need to validate here as well because patch_cb() is
3399 * not called in MMU path while this function is called
3400 */
3401 rc = goya_validate_wreg32(hdev, parser, user_pkt);
3402 break;
3403
3404 case PACKET_WREG_BULK:
3405 dev_err(hdev->dev,
3406 "User not allowed to use WREG_BULK\n");
3407 rc = -EPERM;
3408 break;
3409
3410 case PACKET_MSG_PROT:
3411 dev_err(hdev->dev,
3412 "User not allowed to use MSG_PROT\n");
3413 rc = -EPERM;
3414 break;
3415
3416 case PACKET_CP_DMA:
3417 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3418 rc = -EPERM;
3419 break;
3420
3421 case PACKET_STOP:
3422 dev_err(hdev->dev, "User not allowed to use STOP\n");
3423 rc = -EPERM;
3424 break;
3425
3426 case PACKET_LIN_DMA:
3427 if (is_mmu)
3428 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3429 user_pkt);
3430 else
3431 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3432 user_pkt);
3433 break;
3434
3435 case PACKET_MSG_LONG:
3436 case PACKET_MSG_SHORT:
3437 case PACKET_FENCE:
3438 case PACKET_NOP:
3439 parser->patched_cb_size += pkt_size;
3440 break;
3441
3442 default:
3443 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3444 pkt_id);
3445 rc = -EINVAL;
3446 break;
3447 }
3448
3449 if (rc)
3450 break;
3451 }
3452
3453 /*
3454 * The new CB should have space at the end for two MSG_PROT packets:
3455 * 1. A packet that will act as a completion packet
3456 * 2. A packet that will generate MSI-X interrupt
3457 */
3458 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3459
3460 return rc;
3461}
3462
3463static int goya_patch_dma_packet(struct hl_device *hdev,
3464 struct hl_cs_parser *parser,
3465 struct packet_lin_dma *user_dma_pkt,
3466 struct packet_lin_dma *new_dma_pkt,
3467 u32 *new_dma_pkt_size)
3468{
3469 struct hl_userptr *userptr;
3470 struct scatterlist *sg, *sg_next_iter;
Oded Gabbaye99f16832019-02-24 11:55:26 +02003471 u32 count, dma_desc_cnt;
3472 u64 len, len_next;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003473 dma_addr_t dma_addr, dma_addr_next;
3474 enum goya_dma_direction user_dir;
3475 u64 device_memory_addr, addr;
3476 enum dma_data_direction dir;
3477 struct sg_table *sgt;
3478 bool skip_host_mem_pin = false;
3479 bool user_memset;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003480 u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003481
Tomer Tayardf697bc2019-02-28 10:46:22 +02003482 ctl = le32_to_cpu(user_dma_pkt->ctl);
3483
3484 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003485 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3486
Tomer Tayardf697bc2019-02-28 10:46:22 +02003487 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003488 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3489
3490 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
3491 (user_dma_pkt->tsize == 0)) {
3492 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3493 *new_dma_pkt_size = sizeof(*new_dma_pkt);
3494 return 0;
3495 }
3496
3497 if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
Tomer Tayardf697bc2019-02-28 10:46:22 +02003498 addr = le64_to_cpu(user_dma_pkt->src_addr);
3499 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003500 dir = DMA_TO_DEVICE;
3501 if (user_memset)
3502 skip_host_mem_pin = true;
3503 } else {
Tomer Tayardf697bc2019-02-28 10:46:22 +02003504 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3505 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003506 dir = DMA_FROM_DEVICE;
3507 }
3508
3509 if ((!skip_host_mem_pin) &&
Tomer Tayardf697bc2019-02-28 10:46:22 +02003510 (hl_userptr_is_pinned(hdev, addr,
3511 le32_to_cpu(user_dma_pkt->tsize),
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003512 parser->job_userptr_list, &userptr) == false)) {
3513 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3514 addr, user_dma_pkt->tsize);
3515 return -EFAULT;
3516 }
3517
3518 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3519 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3520 *new_dma_pkt_size = sizeof(*user_dma_pkt);
3521 return 0;
3522 }
3523
Tomer Tayardf697bc2019-02-28 10:46:22 +02003524 user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003525
Tomer Tayardf697bc2019-02-28 10:46:22 +02003526 user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003527
3528 sgt = userptr->sgt;
3529 dma_desc_cnt = 0;
3530
3531 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3532 len = sg_dma_len(sg);
3533 dma_addr = sg_dma_address(sg);
3534
3535 if (len == 0)
3536 break;
3537
3538 while ((count + 1) < sgt->nents) {
3539 sg_next_iter = sg_next(sg);
3540 len_next = sg_dma_len(sg_next_iter);
3541 dma_addr_next = sg_dma_address(sg_next_iter);
3542
3543 if (len_next == 0)
3544 break;
3545
3546 if ((dma_addr + len == dma_addr_next) &&
3547 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3548 len += len_next;
3549 count++;
3550 sg = sg_next_iter;
3551 } else {
3552 break;
3553 }
3554 }
3555
Tomer Tayardf697bc2019-02-28 10:46:22 +02003556 ctl = le32_to_cpu(user_dma_pkt->ctl);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003557 if (likely(dma_desc_cnt))
Tomer Tayardf697bc2019-02-28 10:46:22 +02003558 ctl &= ~GOYA_PKT_CTL_EB_MASK;
3559 ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
3560 GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
3561 new_dma_pkt->ctl = cpu_to_le32(ctl);
3562 new_dma_pkt->tsize = cpu_to_le32((u32) len);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003563
3564 dma_addr += hdev->asic_prop.host_phys_base_address;
3565
3566 if (dir == DMA_TO_DEVICE) {
Tomer Tayardf697bc2019-02-28 10:46:22 +02003567 new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
3568 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003569 } else {
Tomer Tayardf697bc2019-02-28 10:46:22 +02003570 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
3571 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003572 }
3573
3574 if (!user_memset)
3575 device_memory_addr += len;
3576 dma_desc_cnt++;
3577 new_dma_pkt++;
3578 }
3579
3580 if (!dma_desc_cnt) {
3581 dev_err(hdev->dev,
3582 "Error of 0 SG entries when patching DMA packet\n");
3583 return -EFAULT;
3584 }
3585
3586 /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
3587 new_dma_pkt--;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003588 new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003589
3590 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
3591
3592 return 0;
3593}
3594
3595static int goya_patch_cb(struct hl_device *hdev,
3596 struct hl_cs_parser *parser)
3597{
3598 u32 cb_parsed_length = 0;
3599 u32 cb_patched_cur_length = 0;
3600 int rc = 0;
3601
3602 /* cb_user_size is more than 0 so loop will always be executed */
3603 while (cb_parsed_length < parser->user_cb_size) {
3604 enum packet_id pkt_id;
3605 u16 pkt_size;
3606 u32 new_pkt_size = 0;
3607 void *user_pkt, *kernel_pkt;
3608
3609 user_pkt = (void *) (uintptr_t)
3610 (parser->user_cb->kernel_address + cb_parsed_length);
3611 kernel_pkt = (void *) (uintptr_t)
3612 (parser->patched_cb->kernel_address +
3613 cb_patched_cur_length);
3614
3615 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
3616 PACKET_HEADER_PACKET_ID_MASK) >>
3617 PACKET_HEADER_PACKET_ID_SHIFT);
3618
3619 pkt_size = goya_packet_sizes[pkt_id];
3620 cb_parsed_length += pkt_size;
3621 if (cb_parsed_length > parser->user_cb_size) {
3622 dev_err(hdev->dev,
3623 "packet 0x%x is out of CB boundary\n", pkt_id);
3624 rc = -EINVAL;
3625 break;
3626 }
3627
3628 switch (pkt_id) {
3629 case PACKET_LIN_DMA:
3630 rc = goya_patch_dma_packet(hdev, parser, user_pkt,
3631 kernel_pkt, &new_pkt_size);
3632 cb_patched_cur_length += new_pkt_size;
3633 break;
3634
3635 case PACKET_WREG_32:
3636 memcpy(kernel_pkt, user_pkt, pkt_size);
3637 cb_patched_cur_length += pkt_size;
3638 rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
3639 break;
3640
3641 case PACKET_WREG_BULK:
3642 dev_err(hdev->dev,
3643 "User not allowed to use WREG_BULK\n");
3644 rc = -EPERM;
3645 break;
3646
3647 case PACKET_MSG_PROT:
3648 dev_err(hdev->dev,
3649 "User not allowed to use MSG_PROT\n");
3650 rc = -EPERM;
3651 break;
3652
3653 case PACKET_CP_DMA:
3654 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3655 rc = -EPERM;
3656 break;
3657
3658 case PACKET_STOP:
3659 dev_err(hdev->dev, "User not allowed to use STOP\n");
3660 rc = -EPERM;
3661 break;
3662
3663 case PACKET_MSG_LONG:
3664 case PACKET_MSG_SHORT:
3665 case PACKET_FENCE:
3666 case PACKET_NOP:
3667 memcpy(kernel_pkt, user_pkt, pkt_size);
3668 cb_patched_cur_length += pkt_size;
3669 break;
3670
3671 default:
3672 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3673 pkt_id);
3674 rc = -EINVAL;
3675 break;
3676 }
3677
3678 if (rc)
3679 break;
3680 }
3681
3682 return rc;
3683}
3684
3685static int goya_parse_cb_mmu(struct hl_device *hdev,
3686 struct hl_cs_parser *parser)
3687{
3688 u64 patched_cb_handle;
3689 u32 patched_cb_size;
3690 struct hl_cb *user_cb;
3691 int rc;
3692
3693 /*
3694 * The new CB should have space at the end for two MSG_PROT pkt:
3695 * 1. A packet that will act as a completion packet
3696 * 2. A packet that will generate MSI-X interrupt
3697 */
3698 parser->patched_cb_size = parser->user_cb_size +
3699 sizeof(struct packet_msg_prot) * 2;
3700
3701 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
3702 parser->patched_cb_size,
3703 &patched_cb_handle, HL_KERNEL_ASID_ID);
3704
3705 if (rc) {
3706 dev_err(hdev->dev,
3707 "Failed to allocate patched CB for DMA CS %d\n",
3708 rc);
3709 return rc;
3710 }
3711
3712 patched_cb_handle >>= PAGE_SHIFT;
3713 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
3714 (u32) patched_cb_handle);
3715 /* hl_cb_get should never fail here so use kernel WARN */
3716 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
3717 (u32) patched_cb_handle);
3718 if (!parser->patched_cb) {
3719 rc = -EFAULT;
3720 goto out;
3721 }
3722
3723 /*
3724 * The check that parser->user_cb_size <= parser->user_cb->size was done
3725 * in validate_queue_index().
3726 */
3727 memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
3728 (void *) (uintptr_t) parser->user_cb->kernel_address,
3729 parser->user_cb_size);
3730
3731 patched_cb_size = parser->patched_cb_size;
3732
3733 /* validate patched CB instead of user CB */
3734 user_cb = parser->user_cb;
3735 parser->user_cb = parser->patched_cb;
3736 rc = goya_validate_cb(hdev, parser, true);
3737 parser->user_cb = user_cb;
3738
3739 if (rc) {
3740 hl_cb_put(parser->patched_cb);
3741 goto out;
3742 }
3743
3744 if (patched_cb_size != parser->patched_cb_size) {
3745 dev_err(hdev->dev, "user CB size mismatch\n");
3746 hl_cb_put(parser->patched_cb);
3747 rc = -EINVAL;
3748 goto out;
3749 }
3750
3751out:
3752 /*
3753 * Always call cb destroy here because we still have 1 reference
3754 * to it by calling cb_get earlier. After the job will be completed,
3755 * cb_put will release it, but here we want to remove it from the
3756 * idr
3757 */
3758 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
3759 patched_cb_handle << PAGE_SHIFT);
3760
3761 return rc;
3762}
3763
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003764static int goya_parse_cb_no_mmu(struct hl_device *hdev,
3765 struct hl_cs_parser *parser)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003766{
3767 u64 patched_cb_handle;
3768 int rc;
3769
3770 rc = goya_validate_cb(hdev, parser, false);
3771
3772 if (rc)
3773 goto free_userptr;
3774
3775 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
3776 parser->patched_cb_size,
3777 &patched_cb_handle, HL_KERNEL_ASID_ID);
3778 if (rc) {
3779 dev_err(hdev->dev,
3780 "Failed to allocate patched CB for DMA CS %d\n", rc);
3781 goto free_userptr;
3782 }
3783
3784 patched_cb_handle >>= PAGE_SHIFT;
3785 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
3786 (u32) patched_cb_handle);
3787 /* hl_cb_get should never fail here so use kernel WARN */
3788 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
3789 (u32) patched_cb_handle);
3790 if (!parser->patched_cb) {
3791 rc = -EFAULT;
3792 goto out;
3793 }
3794
3795 rc = goya_patch_cb(hdev, parser);
3796
3797 if (rc)
3798 hl_cb_put(parser->patched_cb);
3799
3800out:
3801 /*
3802 * Always call cb destroy here because we still have 1 reference
3803 * to it by calling cb_get earlier. After the job will be completed,
3804 * cb_put will release it, but here we want to remove it from the
3805 * idr
3806 */
3807 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
3808 patched_cb_handle << PAGE_SHIFT);
3809
3810free_userptr:
3811 if (rc)
3812 hl_userptr_delete_list(hdev, parser->job_userptr_list);
3813 return rc;
3814}
3815
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003816static int goya_parse_cb_no_ext_quque(struct hl_device *hdev,
3817 struct hl_cs_parser *parser)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003818{
3819 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
3820 struct goya_device *goya = hdev->asic_specific;
3821
3822 if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
3823 /* For internal queue jobs, just check if cb address is valid */
3824 if (hl_mem_area_inside_range(
3825 (u64) (uintptr_t) parser->user_cb,
3826 parser->user_cb_size,
3827 asic_prop->sram_user_base_address,
3828 asic_prop->sram_end_address))
3829 return 0;
3830
3831 if (hl_mem_area_inside_range(
3832 (u64) (uintptr_t) parser->user_cb,
3833 parser->user_cb_size,
3834 asic_prop->dram_user_base_address,
3835 asic_prop->dram_end_address))
3836 return 0;
3837
3838 dev_err(hdev->dev,
Oded Gabbaydbbe358b2019-03-02 11:43:12 +02003839 "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
Oded Gabbayefaa2812019-02-28 11:55:45 +02003840 parser->user_cb, parser->user_cb_size);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003841
3842 return -EFAULT;
3843 }
3844
3845 return 0;
3846}
3847
3848int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
3849{
3850 struct goya_device *goya = hdev->asic_specific;
3851
3852 if (!parser->ext_queue)
3853 return goya_parse_cb_no_ext_quque(hdev, parser);
3854
3855 if ((goya->hw_cap_initialized & HW_CAP_MMU) && parser->use_virt_addr)
3856 return goya_parse_cb_mmu(hdev, parser);
3857 else
3858 return goya_parse_cb_no_mmu(hdev, parser);
3859}
3860
3861void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
3862 u32 cq_val, u32 msix_vec)
3863{
3864 struct packet_msg_prot *cq_pkt;
Tomer Tayardf697bc2019-02-28 10:46:22 +02003865 u32 tmp;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003866
3867 cq_pkt = (struct packet_msg_prot *) (uintptr_t)
3868 (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
3869
Tomer Tayardf697bc2019-02-28 10:46:22 +02003870 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003871 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3872 (1 << GOYA_PKT_CTL_MB_SHIFT);
Tomer Tayardf697bc2019-02-28 10:46:22 +02003873 cq_pkt->ctl = cpu_to_le32(tmp);
3874 cq_pkt->value = cpu_to_le32(cq_val);
3875 cq_pkt->addr = cpu_to_le64(cq_addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003876
3877 cq_pkt++;
3878
Tomer Tayardf697bc2019-02-28 10:46:22 +02003879 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003880 (1 << GOYA_PKT_CTL_MB_SHIFT);
Tomer Tayardf697bc2019-02-28 10:46:22 +02003881 cq_pkt->ctl = cpu_to_le32(tmp);
3882 cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
3883 cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003884}
3885
Oded Gabbay1251f232019-02-16 00:39:18 +02003886static void goya_update_eq_ci(struct hl_device *hdev, u32 val)
3887{
3888 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
3889}
3890
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003891static void goya_restore_phase_topology(struct hl_device *hdev)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003892{
3893 int i, num_of_sob_in_longs, num_of_mon_in_longs;
3894
3895 num_of_sob_in_longs =
3896 ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
3897
3898 num_of_mon_in_longs =
3899 ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
3900
3901 for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
3902 WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
3903
3904 for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
3905 WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
3906
3907 /* Flush all WREG to prevent race */
3908 i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
3909}
3910
Oded Gabbayc2164772019-02-16 00:39:24 +02003911/*
3912 * goya_debugfs_read32 - read a 32bit value from a given device address
3913 *
3914 * @hdev: pointer to hl_device structure
3915 * @addr: address in device
3916 * @val: returned value
3917 *
3918 * In case of DDR address that is not mapped into the default aperture that
3919 * the DDR bar exposes, the function will configure the iATU so that the DDR
3920 * bar will be positioned at a base address that allows reading from the
3921 * required address. Configuring the iATU during normal operation can
3922 * lead to undefined behavior and therefore, should be done with extreme care
3923 *
3924 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003925static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
Oded Gabbayc2164772019-02-16 00:39:24 +02003926{
3927 struct asic_fixed_properties *prop = &hdev->asic_prop;
3928 int rc = 0;
3929
3930 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
3931 *val = RREG32(addr - CFG_BASE);
3932
3933 } else if ((addr >= SRAM_BASE_ADDR) &&
3934 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
3935
3936 *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
3937 (addr - SRAM_BASE_ADDR));
3938
3939 } else if ((addr >= DRAM_PHYS_BASE) &&
3940 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
3941
3942 u64 bar_base_addr = DRAM_PHYS_BASE +
3943 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
3944
3945 rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
3946 if (!rc) {
3947 *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
3948 (addr - bar_base_addr));
3949
3950 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
3951 (MMU_PAGE_TABLES_ADDR &
3952 ~(prop->dram_pci_bar_size - 0x1ull)));
3953 }
3954 } else {
3955 rc = -EFAULT;
3956 }
3957
3958 return rc;
3959}
3960
3961/*
3962 * goya_debugfs_write32 - write a 32bit value to a given device address
3963 *
3964 * @hdev: pointer to hl_device structure
3965 * @addr: address in device
3966 * @val: returned value
3967 *
3968 * In case of DDR address that is not mapped into the default aperture that
3969 * the DDR bar exposes, the function will configure the iATU so that the DDR
3970 * bar will be positioned at a base address that allows writing to the
3971 * required address. Configuring the iATU during normal operation can
3972 * lead to undefined behavior and therefore, should be done with extreme care
3973 *
3974 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003975static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
Oded Gabbayc2164772019-02-16 00:39:24 +02003976{
3977 struct asic_fixed_properties *prop = &hdev->asic_prop;
3978 int rc = 0;
3979
3980 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
3981 WREG32(addr - CFG_BASE, val);
3982
3983 } else if ((addr >= SRAM_BASE_ADDR) &&
3984 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
3985
3986 writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
3987 (addr - SRAM_BASE_ADDR));
3988
3989 } else if ((addr >= DRAM_PHYS_BASE) &&
3990 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
3991
3992 u64 bar_base_addr = DRAM_PHYS_BASE +
3993 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
3994
3995 rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
3996 if (!rc) {
3997 writel(val, hdev->pcie_bar[DDR_BAR_ID] +
3998 (addr - bar_base_addr));
3999
4000 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
4001 (MMU_PAGE_TABLES_ADDR &
4002 ~(prop->dram_pci_bar_size - 0x1ull)));
4003 }
4004 } else {
4005 rc = -EFAULT;
4006 }
4007
4008 return rc;
4009}
4010
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004011static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4012{
4013 struct goya_device *goya = hdev->asic_specific;
4014
4015 return readq(hdev->pcie_bar[DDR_BAR_ID] +
4016 (addr - goya->ddr_bar_cur_addr));
4017}
4018
4019static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4020{
4021 struct goya_device *goya = hdev->asic_specific;
4022
4023 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4024 (addr - goya->ddr_bar_cur_addr));
4025}
4026
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004027static const char *_goya_get_event_desc(u16 event_type)
Oded Gabbay1251f232019-02-16 00:39:18 +02004028{
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004029 switch (event_type) {
4030 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4031 return "PCIe_dec";
4032 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4033 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4034 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4035 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4036 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4037 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4038 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4039 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4040 return "TPC%d_dec";
4041 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4042 return "MME_wacs";
4043 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4044 return "MME_wacsd";
4045 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4046 return "CPU_axi_splitter";
4047 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4048 return "PSOC_axi_dec";
4049 case GOYA_ASYNC_EVENT_ID_PSOC:
4050 return "PSOC";
4051 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4052 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4053 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4054 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4055 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4056 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4057 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4058 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4059 return "TPC%d_krn_err";
4060 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4061 return "TPC%d_cq";
4062 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4063 return "TPC%d_qm";
4064 case GOYA_ASYNC_EVENT_ID_MME_QM:
4065 return "MME_qm";
4066 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4067 return "MME_cq";
4068 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4069 return "DMA%d_qm";
4070 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4071 return "DMA%d_ch";
4072 default:
4073 return "N/A";
4074 }
Oded Gabbay1251f232019-02-16 00:39:18 +02004075}
4076
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004077static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
Oded Gabbay1251f232019-02-16 00:39:18 +02004078{
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004079 u8 index;
Oded Gabbay1251f232019-02-16 00:39:18 +02004080
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004081 switch (event_type) {
4082 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4083 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4084 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4085 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4086 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4087 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4088 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4089 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4090 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4091 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4092 break;
4093 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4094 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4095 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4096 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4097 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4098 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4099 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4100 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4101 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4102 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4103 break;
4104 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4105 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4106 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4107 break;
4108 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4109 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4110 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4111 break;
4112 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4113 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4114 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4115 break;
4116 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4117 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4118 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4119 break;
4120 default:
4121 snprintf(desc, size, _goya_get_event_desc(event_type));
4122 break;
4123 }
4124}
Oded Gabbay1251f232019-02-16 00:39:18 +02004125
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004126static void goya_print_razwi_info(struct hl_device *hdev)
4127{
4128 if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4129 dev_err(hdev->dev, "Illegal write to LBW\n");
4130 WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4131 }
Oded Gabbay1251f232019-02-16 00:39:18 +02004132
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004133 if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4134 dev_err(hdev->dev, "Illegal read from LBW\n");
4135 WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4136 }
4137
4138 if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4139 dev_err(hdev->dev, "Illegal write to HBW\n");
4140 WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4141 }
4142
4143 if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4144 dev_err(hdev->dev, "Illegal read from HBW\n");
4145 WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4146 }
4147}
4148
4149static void goya_print_mmu_error_info(struct hl_device *hdev)
4150{
4151 struct goya_device *goya = hdev->asic_specific;
4152 u64 addr;
4153 u32 val;
4154
4155 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4156 return;
4157
4158 val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4159 if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4160 addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4161 addr <<= 32;
4162 addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4163
4164 dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr);
4165
4166 WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
Oded Gabbay1251f232019-02-16 00:39:18 +02004167 }
4168}
4169
4170static void goya_print_irq_info(struct hl_device *hdev, u16 event_type)
4171{
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004172 char desc[20] = "";
Oded Gabbay1251f232019-02-16 00:39:18 +02004173
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004174 goya_get_event_desc(event_type, desc, sizeof(desc));
4175 dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4176 event_type, desc);
Oded Gabbay1251f232019-02-16 00:39:18 +02004177
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004178 goya_print_razwi_info(hdev);
4179 goya_print_mmu_error_info(hdev);
Oded Gabbay1251f232019-02-16 00:39:18 +02004180}
4181
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004182static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4183 size_t irq_arr_size)
4184{
4185 struct armcp_unmask_irq_arr_packet *pkt;
4186 size_t total_pkt_size;
4187 long result;
4188 int rc;
4189
4190 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
4191 irq_arr_size;
4192
4193 /* data should be aligned to 8 bytes in order to ArmCP to copy it */
4194 total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4195
4196 /* total_pkt_size is casted to u16 later on */
4197 if (total_pkt_size > USHRT_MAX) {
4198 dev_err(hdev->dev, "too many elements in IRQ array\n");
4199 return -EINVAL;
4200 }
4201
4202 pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4203 if (!pkt)
4204 return -ENOMEM;
4205
Tomer Tayardf697bc2019-02-28 10:46:22 +02004206 pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004207 memcpy(&pkt->irqs, irq_arr, irq_arr_size);
4208
Tomer Tayardf697bc2019-02-28 10:46:22 +02004209 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4210 ARMCP_PKT_CTL_OPCODE_SHIFT);
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004211
4212 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
4213 total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
4214
4215 if (rc)
4216 dev_err(hdev->dev, "failed to unmask IRQ array\n");
4217
4218 kfree(pkt);
4219
4220 return rc;
4221}
4222
4223static int goya_soft_reset_late_init(struct hl_device *hdev)
4224{
4225 /*
4226 * Unmask all IRQs since some could have been received
4227 * during the soft reset
4228 */
Oded Gabbayb24ca452019-02-24 15:50:53 +02004229 return goya_unmask_irq_arr(hdev, goya_all_events,
4230 sizeof(goya_all_events));
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004231}
4232
Oded Gabbay1251f232019-02-16 00:39:18 +02004233static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4234{
4235 struct armcp_packet pkt;
4236 long result;
4237 int rc;
4238
4239 memset(&pkt, 0, sizeof(pkt));
4240
Tomer Tayardf697bc2019-02-28 10:46:22 +02004241 pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
4242 ARMCP_PKT_CTL_OPCODE_SHIFT);
4243 pkt.value = cpu_to_le64(event_type);
Oded Gabbay1251f232019-02-16 00:39:18 +02004244
4245 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4246 HL_DEVICE_TIMEOUT_USEC, &result);
4247
4248 if (rc)
4249 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4250
4251 return rc;
4252}
4253
4254void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4255{
Tomer Tayardf697bc2019-02-28 10:46:22 +02004256 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
4257 u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
4258 >> EQ_CTL_EVENT_TYPE_SHIFT);
Oded Gabbay1251f232019-02-16 00:39:18 +02004259 struct goya_device *goya = hdev->asic_specific;
4260
4261 goya->events_stat[event_type]++;
4262
4263 switch (event_type) {
4264 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4265 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4266 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4267 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4268 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4269 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4270 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4271 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4272 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4273 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4274 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4275 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4276 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4277 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4278 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4279 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4280 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4281 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4282 case GOYA_ASYNC_EVENT_ID_GIC500:
4283 case GOYA_ASYNC_EVENT_ID_PLL0:
4284 case GOYA_ASYNC_EVENT_ID_PLL1:
4285 case GOYA_ASYNC_EVENT_ID_PLL3:
4286 case GOYA_ASYNC_EVENT_ID_PLL4:
4287 case GOYA_ASYNC_EVENT_ID_PLL5:
4288 case GOYA_ASYNC_EVENT_ID_PLL6:
4289 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4290 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4291 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4292 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4293 dev_err(hdev->dev,
4294 "Received H/W interrupt %d, reset the chip\n",
4295 event_type);
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004296 hl_device_reset(hdev, true, false);
Oded Gabbay1251f232019-02-16 00:39:18 +02004297 break;
4298
4299 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4300 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4301 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4302 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4303 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4304 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4305 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4306 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4307 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4308 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4309 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4310 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4311 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4312 case GOYA_ASYNC_EVENT_ID_PSOC:
4313 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4314 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4315 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4316 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4317 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4318 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4319 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4320 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4321 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4322 case GOYA_ASYNC_EVENT_ID_MME_QM:
4323 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4324 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4325 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4326 goya_print_irq_info(hdev, event_type);
4327 goya_unmask_irq(hdev, event_type);
4328 break;
4329
4330 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4331 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4332 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4333 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4334 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4335 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4336 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4337 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4338 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0:
4339 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH1:
4340 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH2:
4341 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH3:
4342 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4343 dev_info(hdev->dev, "Received H/W interrupt %d\n", event_type);
4344 break;
4345
4346 default:
4347 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4348 event_type);
4349 break;
4350 }
4351}
4352
4353void *goya_get_events_stat(struct hl_device *hdev, u32 *size)
4354{
4355 struct goya_device *goya = hdev->asic_specific;
4356
4357 *size = (u32) sizeof(goya->events_stat);
4358
4359 return goya->events_stat;
4360}
4361
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004362static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
4363 u64 val, bool is_dram)
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004364{
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004365 struct packet_lin_dma *lin_dma_pkt;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004366 struct hl_cs_parser parser;
4367 struct hl_cs_job *job;
Tomer Tayardf697bc2019-02-28 10:46:22 +02004368 u32 cb_size, ctl;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004369 struct hl_cb *cb;
4370 int rc;
4371
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004372 cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
4373 if (!cb)
4374 return -EFAULT;
4375
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004376 lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004377
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004378 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4379 cb_size = sizeof(*lin_dma_pkt);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004380
Tomer Tayardf697bc2019-02-28 10:46:22 +02004381 ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4382 (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4383 (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4384 (1 << GOYA_PKT_CTL_RB_SHIFT) |
4385 (1 << GOYA_PKT_CTL_MB_SHIFT));
4386 ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
4387 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4388 lin_dma_pkt->ctl = cpu_to_le32(ctl);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004389
Tomer Tayardf697bc2019-02-28 10:46:22 +02004390 lin_dma_pkt->src_addr = cpu_to_le64(val);
4391 lin_dma_pkt->dst_addr = cpu_to_le64(addr);
4392 lin_dma_pkt->tsize = cpu_to_le32(size);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004393
4394 job = hl_cs_allocate_job(hdev, true);
4395 if (!job) {
4396 dev_err(hdev->dev, "Failed to allocate a new job\n");
4397 rc = -ENOMEM;
4398 goto release_cb;
4399 }
4400
4401 job->id = 0;
4402 job->user_cb = cb;
4403 job->user_cb->cs_cnt++;
4404 job->user_cb_size = cb_size;
4405 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4406
Oded Gabbayc2164772019-02-16 00:39:24 +02004407 hl_debugfs_add_job(hdev, job);
4408
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004409 parser.ctx_id = HL_KERNEL_ASID_ID;
4410 parser.cs_sequence = 0;
4411 parser.job_id = job->id;
4412 parser.hw_queue_id = job->hw_queue_id;
4413 parser.job_userptr_list = &job->userptr_list;
4414 parser.user_cb = job->user_cb;
4415 parser.user_cb_size = job->user_cb_size;
4416 parser.ext_queue = job->ext_queue;
4417 parser.use_virt_addr = hdev->mmu_enable;
4418
4419 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
4420 if (rc) {
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004421 dev_err(hdev->dev, "Failed to parse kernel CB\n");
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004422 goto free_job;
4423 }
4424
4425 job->patched_cb = parser.patched_cb;
4426 job->job_cb_size = parser.patched_cb_size;
4427 job->patched_cb->cs_cnt++;
4428
4429 rc = goya_send_job_on_qman0(hdev, job);
4430
4431 job->patched_cb->cs_cnt--;
4432 hl_cb_put(job->patched_cb);
4433
4434free_job:
4435 hl_userptr_delete_list(hdev, &job->userptr_list);
Oded Gabbayc2164772019-02-16 00:39:24 +02004436 hl_debugfs_remove_job(hdev, job);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004437 kfree(job);
4438 cb->cs_cnt--;
4439
4440release_cb:
4441 hl_cb_put(cb);
4442 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
4443
4444 return rc;
4445}
4446
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004447static int goya_context_switch(struct hl_device *hdev, u32 asid)
4448{
4449 struct asic_fixed_properties *prop = &hdev->asic_prop;
4450 u64 addr = prop->sram_base_address;
4451 u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4452 u64 val = 0x7777777777777777ull;
4453 int rc;
4454
4455 rc = goya_memset_device_memory(hdev, addr, size, val, false);
4456 if (rc) {
4457 dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4458 return rc;
4459 }
4460
4461 goya_mmu_prepare(hdev, asid);
4462
4463 return 0;
4464}
4465
4466static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4467{
4468 struct asic_fixed_properties *prop = &hdev->asic_prop;
4469 struct goya_device *goya = hdev->asic_specific;
4470 u64 addr = prop->mmu_pgt_addr;
4471 u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4472 MMU_CACHE_MNG_SIZE;
4473
4474 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4475 return 0;
4476
4477 return goya_memset_device_memory(hdev, addr, size, 0, true);
4478}
4479
4480static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4481{
4482 struct goya_device *goya = hdev->asic_specific;
4483 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
4484 u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
4485 u64 val = 0x9999999999999999ull;
4486
4487 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4488 return 0;
4489
4490 return goya_memset_device_memory(hdev, addr, size, val, true);
4491}
4492
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004493static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
4494{
4495 struct goya_device *goya = hdev->asic_specific;
4496 int i;
4497
4498 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4499 return;
4500
4501 if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
4502 WARN(1, "asid %u is too big\n", asid);
4503 return;
4504 }
4505
4506 /* zero the MMBP and ASID bits and then set the ASID */
4507 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) {
4508 WREG32_AND(goya_mmu_regs[i], ~0x7FF);
4509 WREG32_OR(goya_mmu_regs[i], asid);
4510 }
4511}
4512
4513static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
4514{
4515 struct goya_device *goya = hdev->asic_specific;
4516 u32 status, timeout_usec;
4517 int rc;
4518
4519 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4520 return;
4521
4522 /* no need in L1 only invalidation in Goya */
4523 if (!is_hard)
4524 return;
4525
4526 if (hdev->pldm)
4527 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4528 else
4529 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4530
4531 mutex_lock(&hdev->mmu_cache_lock);
4532
4533 /* L0 & L1 invalidation */
4534 WREG32(mmSTLB_INV_ALL_START, 1);
4535
4536 rc = hl_poll_timeout(
4537 hdev,
4538 mmSTLB_INV_ALL_START,
4539 status,
4540 !status,
4541 1000,
4542 timeout_usec);
4543
4544 mutex_unlock(&hdev->mmu_cache_lock);
4545
4546 if (rc)
4547 dev_notice_ratelimited(hdev->dev,
4548 "Timeout when waiting for MMU cache invalidation\n");
4549}
4550
4551static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
4552 bool is_hard, u32 asid, u64 va, u64 size)
4553{
4554 struct goya_device *goya = hdev->asic_specific;
4555 u32 status, timeout_usec, inv_data, pi;
4556 int rc;
4557
4558 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4559 return;
4560
4561 /* no need in L1 only invalidation in Goya */
4562 if (!is_hard)
4563 return;
4564
4565 if (hdev->pldm)
4566 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4567 else
4568 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4569
4570 mutex_lock(&hdev->mmu_cache_lock);
4571
4572 /*
4573 * TODO: currently invalidate entire L0 & L1 as in regular hard
4574 * invalidation. Need to apply invalidation of specific cache lines with
4575 * mask of ASID & VA & size.
4576 * Note that L1 with be flushed entirely in any case.
4577 */
4578
4579 /* L0 & L1 invalidation */
4580 inv_data = RREG32(mmSTLB_CACHE_INV);
4581 /* PI is 8 bit */
4582 pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
4583 WREG32(mmSTLB_CACHE_INV,
4584 (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
4585
4586 rc = hl_poll_timeout(
4587 hdev,
4588 mmSTLB_INV_CONSUMER_INDEX,
4589 status,
4590 status == pi,
4591 1000,
4592 timeout_usec);
4593
4594 mutex_unlock(&hdev->mmu_cache_lock);
4595
4596 if (rc)
4597 dev_notice_ratelimited(hdev->dev,
4598 "Timeout when waiting for MMU cache invalidation\n");
4599}
4600
4601static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
4602 u64 phys_addr)
4603{
4604 u32 status, timeout_usec;
4605 int rc;
4606
4607 if (hdev->pldm)
4608 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4609 else
4610 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4611
4612 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
4613 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
4614 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
4615
4616 rc = hl_poll_timeout(
4617 hdev,
4618 MMU_ASID_BUSY,
4619 status,
4620 !(status & 0x80000000),
4621 1000,
4622 timeout_usec);
4623
4624 if (rc) {
4625 dev_err(hdev->dev,
4626 "Timeout during MMU hop0 config of asid %d\n", asid);
4627 return rc;
4628 }
4629
4630 return 0;
4631}
4632
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004633int goya_send_heartbeat(struct hl_device *hdev)
4634{
4635 struct goya_device *goya = hdev->asic_specific;
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004636
4637 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
4638 return 0;
4639
Tomer Tayar3110c602019-03-04 10:22:09 +02004640 return hl_fw_send_heartbeat(hdev);
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004641}
4642
Tomer Tayar393e5b52019-03-06 14:30:26 +02004643int goya_armcp_info_get(struct hl_device *hdev)
Oded Gabbayd91389b2019-02-16 00:39:19 +02004644{
4645 struct goya_device *goya = hdev->asic_specific;
4646 struct asic_fixed_properties *prop = &hdev->asic_prop;
Oded Gabbayd91389b2019-02-16 00:39:19 +02004647 u64 dram_size;
Oded Gabbayd91389b2019-02-16 00:39:19 +02004648 int rc;
4649
4650 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
4651 return 0;
4652
Tomer Tayar3110c602019-03-04 10:22:09 +02004653 rc = hl_fw_armcp_info_get(hdev);
4654 if (rc)
4655 return rc;
Oded Gabbayd91389b2019-02-16 00:39:19 +02004656
Tomer Tayardf697bc2019-02-28 10:46:22 +02004657 dram_size = le64_to_cpu(prop->armcp_info.dram_size);
Oded Gabbayd91389b2019-02-16 00:39:19 +02004658 if (dram_size) {
4659 if ((!is_power_of_2(dram_size)) ||
4660 (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
4661 dev_err(hdev->dev,
4662 "F/W reported invalid DRAM size %llu. Trying to use default size\n",
4663 dram_size);
4664 dram_size = DRAM_PHYS_DEFAULT_SIZE;
4665 }
4666
4667 prop->dram_size = dram_size;
4668 prop->dram_end_address = prop->dram_base_address + dram_size;
4669 }
4670
Tomer Tayar3110c602019-03-04 10:22:09 +02004671 return 0;
Oded Gabbayd91389b2019-02-16 00:39:19 +02004672}
4673
4674static void goya_init_clock_gating(struct hl_device *hdev)
4675{
4676
4677}
4678
4679static void goya_disable_clock_gating(struct hl_device *hdev)
4680{
4681
4682}
Oded Gabbay9494a8d2019-02-16 00:39:17 +02004683
Tomer Tayarc811f7b2019-03-07 14:26:02 +02004684static bool goya_is_device_idle(struct hl_device *hdev, char *buf, size_t size)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004685{
4686 u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
4687 int i;
4688
4689 offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
4690
4691 for (i = 0 ; i < DMA_MAX_NUM ; i++) {
4692 dma_qm_reg = mmDMA_QM_0_GLBL_STS0 + i * offset;
4693
4694 if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
4695 DMA_QM_IDLE_MASK)
Tomer Tayarc811f7b2019-03-07 14:26:02 +02004696 return HL_ENG_BUSY(buf, size, "DMA%d_QM", i);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004697 }
4698
4699 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
4700
4701 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
4702 tpc_qm_reg = mmTPC0_QM_GLBL_STS0 + i * offset;
4703 tpc_cmdq_reg = mmTPC0_CMDQ_GLBL_STS0 + i * offset;
4704 tpc_cfg_reg = mmTPC0_CFG_STATUS + i * offset;
4705
4706 if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
4707 TPC_QM_IDLE_MASK)
Tomer Tayarc811f7b2019-03-07 14:26:02 +02004708 return HL_ENG_BUSY(buf, size, "TPC%d_QM", i);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004709
4710 if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
4711 TPC_CMDQ_IDLE_MASK)
Tomer Tayarc811f7b2019-03-07 14:26:02 +02004712 return HL_ENG_BUSY(buf, size, "TPC%d_CMDQ", i);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004713
4714 if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
4715 TPC_CFG_IDLE_MASK)
Tomer Tayarc811f7b2019-03-07 14:26:02 +02004716 return HL_ENG_BUSY(buf, size, "TPC%d_CFG", i);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004717 }
4718
4719 if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
4720 MME_QM_IDLE_MASK)
Tomer Tayarc811f7b2019-03-07 14:26:02 +02004721 return HL_ENG_BUSY(buf, size, "MME_QM");
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004722
4723 if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
4724 MME_CMDQ_IDLE_MASK)
Tomer Tayarc811f7b2019-03-07 14:26:02 +02004725 return HL_ENG_BUSY(buf, size, "MME_CMDQ");
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004726
4727 if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
4728 MME_ARCH_IDLE_MASK)
Tomer Tayarc811f7b2019-03-07 14:26:02 +02004729 return HL_ENG_BUSY(buf, size, "MME_ARCH");
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004730
4731 if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
Tomer Tayarc811f7b2019-03-07 14:26:02 +02004732 return HL_ENG_BUSY(buf, size, "MME");
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004733
4734 return true;
4735}
4736
Oded Gabbay9494a8d2019-02-16 00:39:17 +02004737static void goya_hw_queues_lock(struct hl_device *hdev)
4738{
4739 struct goya_device *goya = hdev->asic_specific;
4740
4741 spin_lock(&goya->hw_queues_lock);
4742}
4743
4744static void goya_hw_queues_unlock(struct hl_device *hdev)
4745{
4746 struct goya_device *goya = hdev->asic_specific;
4747
4748 spin_unlock(&goya->hw_queues_lock);
4749}
4750
Oded Gabbayd8dd7b02019-02-16 00:39:23 +02004751static u32 goya_get_pci_id(struct hl_device *hdev)
4752{
4753 return hdev->pdev->device;
4754}
4755
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004756static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
4757 size_t max_size)
Oded Gabbayd91389b2019-02-16 00:39:19 +02004758{
4759 struct goya_device *goya = hdev->asic_specific;
Oded Gabbayd91389b2019-02-16 00:39:19 +02004760
4761 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
4762 return 0;
4763
Tomer Tayar3110c602019-03-04 10:22:09 +02004764 return hl_fw_get_eeprom_data(hdev, data, max_size);
Oded Gabbayd91389b2019-02-16 00:39:19 +02004765}
4766
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004767static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
4768{
4769 return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS);
4770}
4771
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02004772static const struct hl_asic_funcs goya_funcs = {
4773 .early_init = goya_early_init,
4774 .early_fini = goya_early_fini,
Oded Gabbayd91389b2019-02-16 00:39:19 +02004775 .late_init = goya_late_init,
4776 .late_fini = goya_late_fini,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02004777 .sw_init = goya_sw_init,
4778 .sw_fini = goya_sw_fini,
Oded Gabbay839c4802019-02-16 00:39:16 +02004779 .hw_init = goya_hw_init,
4780 .hw_fini = goya_hw_fini,
Oded Gabbay1251f232019-02-16 00:39:18 +02004781 .halt_engines = goya_halt_engines,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02004782 .suspend = goya_suspend,
4783 .resume = goya_resume,
Oded Gabbaybe5d9262019-02-16 00:39:15 +02004784 .cb_mmap = goya_cb_mmap,
Oded Gabbay9494a8d2019-02-16 00:39:17 +02004785 .ring_doorbell = goya_ring_doorbell,
4786 .flush_pq_write = goya_flush_pq_write,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02004787 .dma_alloc_coherent = goya_dma_alloc_coherent,
4788 .dma_free_coherent = goya_dma_free_coherent,
Oded Gabbay9494a8d2019-02-16 00:39:17 +02004789 .get_int_queue_base = goya_get_int_queue_base,
4790 .test_queues = goya_test_queues,
4791 .dma_pool_zalloc = goya_dma_pool_zalloc,
4792 .dma_pool_free = goya_dma_pool_free,
4793 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
4794 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004795 .hl_dma_unmap_sg = goya_dma_unmap_sg,
4796 .cs_parser = goya_cs_parser,
4797 .asic_dma_map_sg = goya_dma_map_sg,
4798 .get_dma_desc_list_size = goya_get_dma_desc_list_size,
4799 .add_end_of_cb_packets = goya_add_end_of_cb_packets,
Oded Gabbay1251f232019-02-16 00:39:18 +02004800 .update_eq_ci = goya_update_eq_ci,
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004801 .context_switch = goya_context_switch,
4802 .restore_phase_topology = goya_restore_phase_topology,
Oded Gabbayc2164772019-02-16 00:39:24 +02004803 .debugfs_read32 = goya_debugfs_read32,
4804 .debugfs_write32 = goya_debugfs_write32,
Oded Gabbayd91389b2019-02-16 00:39:19 +02004805 .add_device_attr = goya_add_device_attr,
Oded Gabbay1251f232019-02-16 00:39:18 +02004806 .handle_eqe = goya_handle_eqe,
Oded Gabbayd91389b2019-02-16 00:39:19 +02004807 .set_pll_profile = goya_set_pll_profile,
Oded Gabbay1251f232019-02-16 00:39:18 +02004808 .get_events_stat = goya_get_events_stat,
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004809 .read_pte = goya_read_pte,
4810 .write_pte = goya_write_pte,
4811 .mmu_invalidate_cache = goya_mmu_invalidate_cache,
4812 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004813 .send_heartbeat = goya_send_heartbeat,
Oded Gabbayd91389b2019-02-16 00:39:19 +02004814 .enable_clock_gating = goya_init_clock_gating,
4815 .disable_clock_gating = goya_disable_clock_gating,
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004816 .is_device_idle = goya_is_device_idle,
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004817 .soft_reset_late_init = goya_soft_reset_late_init,
Oded Gabbay9494a8d2019-02-16 00:39:17 +02004818 .hw_queues_lock = goya_hw_queues_lock,
4819 .hw_queues_unlock = goya_hw_queues_unlock,
Oded Gabbayd8dd7b02019-02-16 00:39:23 +02004820 .get_pci_id = goya_get_pci_id,
Oded Gabbayd91389b2019-02-16 00:39:19 +02004821 .get_eeprom_data = goya_get_eeprom_data,
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004822 .send_cpu_message = goya_send_cpu_message,
Tomer Tayarb6f897d2019-03-05 16:48:42 +02004823 .get_hw_state = goya_get_hw_state,
4824 .pci_bars_map = goya_pci_bars_map,
4825 .set_dram_bar_base = goya_set_ddr_bar_base,
4826 .init_iatu = goya_init_iatu
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02004827};
4828
4829/*
4830 * goya_set_asic_funcs - set Goya function pointers
4831 *
4832 * @*hdev: pointer to hl_device structure
4833 *
4834 */
4835void goya_set_asic_funcs(struct hl_device *hdev)
4836{
4837 hdev->asic_funcs = &goya_funcs;
4838}