blob: 6f0075c4e9357b8eb9603b27a79af26e134d01d9 [file] [log] [blame]
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02001// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "goyaP.h"
Omer Shpigelman0feaf862019-02-16 00:39:22 +02009#include "include/hw_ip/mmu/mmu_general.h"
10#include "include/hw_ip/mmu/mmu_v1_0.h"
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020011#include "include/goya/asic_reg/goya_masks.h"
12
13#include <linux/pci.h>
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020014#include <linux/genalloc.h>
Oded Gabbay839c4802019-02-16 00:39:16 +020015#include <linux/firmware.h>
Oded Gabbayd91389b2019-02-16 00:39:19 +020016#include <linux/hwmon.h>
Oded Gabbay839c4802019-02-16 00:39:16 +020017#include <linux/io-64-nonatomic-lo-hi.h>
18#include <linux/io-64-nonatomic-hi-lo.h>
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020019
20/*
21 * GOYA security scheme:
22 *
23 * 1. Host is protected by:
24 * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
25 * - MMU
26 *
27 * 2. DRAM is protected by:
28 * - Range registers (protect the first 512MB)
29 * - MMU (isolation between users)
30 *
31 * 3. Configuration is protected by:
32 * - Range registers
33 * - Protection bits
34 *
35 * When MMU is disabled:
36 *
37 * QMAN DMA: PQ, CQ, CP, DMA are secured.
38 * PQ, CB and the data are on the host.
39 *
40 * QMAN TPC/MME:
41 * PQ, CQ and CP are not secured.
42 * PQ, CB and the data are on the SRAM/DRAM.
43 *
44 * Since QMAN DMA is secured, KMD is parsing the DMA CB:
45 * - KMD checks DMA pointer
46 * - WREG, MSG_PROT are not allowed.
47 * - MSG_LONG/SHORT are allowed.
48 *
49 * A read/write transaction by the QMAN to a protected area will succeed if
50 * and only if the QMAN's CP is secured and MSG_PROT is used
51 *
52 *
53 * When MMU is enabled:
54 *
55 * QMAN DMA: PQ, CQ and CP are secured.
56 * MMU is set to bypass on the Secure props register of the QMAN.
57 * The reasons we don't enable MMU for PQ, CQ and CP are:
58 * - PQ entry is in kernel address space and KMD doesn't map it.
59 * - CP writes to MSIX register and to kernel address space (completion
60 * queue).
61 *
62 * DMA is not secured but because CP is secured, KMD still needs to parse the
63 * CB, but doesn't need to check the DMA addresses.
64 *
65 * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD
66 * doesn't map memory in MMU.
67 *
68 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
69 *
70 * DMA RR does NOT protect host because DMA is not secured
71 *
72 */
73
74#define GOYA_MMU_REGS_NUM 61
75
76#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
77
78#define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
79#define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
80#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
81#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
82#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
83#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
84#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
Omer Shpigelman0feaf862019-02-16 00:39:22 +020085#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +020086
87#define GOYA_QMAN0_FENCE_VAL 0xD169B243
88
89#define GOYA_MAX_INITIATORS 20
90
Oded Gabbay1251f232019-02-16 00:39:18 +020091#define GOYA_MAX_STRING_LEN 20
92
Oded Gabbaybe5d9262019-02-16 00:39:15 +020093#define GOYA_CB_POOL_CB_CNT 512
94#define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
95
Oded Gabbay1251f232019-02-16 00:39:18 +020096static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
97 "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
98 "goya cq 4", "goya cpu eq"
99};
100
Oded Gabbayeff6f4a2019-02-16 00:39:21 +0200101static u16 goya_packet_sizes[MAX_PACKET_ID] = {
102 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
103 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
104 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
105 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
106 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
107 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
108 [PACKET_FENCE] = sizeof(struct packet_fence),
109 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
110 [PACKET_NOP] = sizeof(struct packet_nop),
111 [PACKET_STOP] = sizeof(struct packet_stop)
112};
113
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200114static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
115 mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
116 mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
117 mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
118 mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
119 mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
120 mmTPC0_QM_GLBL_SECURE_PROPS,
121 mmTPC0_QM_GLBL_NON_SECURE_PROPS,
122 mmTPC0_CMDQ_GLBL_SECURE_PROPS,
123 mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
124 mmTPC0_CFG_ARUSER,
125 mmTPC0_CFG_AWUSER,
126 mmTPC1_QM_GLBL_SECURE_PROPS,
127 mmTPC1_QM_GLBL_NON_SECURE_PROPS,
128 mmTPC1_CMDQ_GLBL_SECURE_PROPS,
129 mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
130 mmTPC1_CFG_ARUSER,
131 mmTPC1_CFG_AWUSER,
132 mmTPC2_QM_GLBL_SECURE_PROPS,
133 mmTPC2_QM_GLBL_NON_SECURE_PROPS,
134 mmTPC2_CMDQ_GLBL_SECURE_PROPS,
135 mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
136 mmTPC2_CFG_ARUSER,
137 mmTPC2_CFG_AWUSER,
138 mmTPC3_QM_GLBL_SECURE_PROPS,
139 mmTPC3_QM_GLBL_NON_SECURE_PROPS,
140 mmTPC3_CMDQ_GLBL_SECURE_PROPS,
141 mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
142 mmTPC3_CFG_ARUSER,
143 mmTPC3_CFG_AWUSER,
144 mmTPC4_QM_GLBL_SECURE_PROPS,
145 mmTPC4_QM_GLBL_NON_SECURE_PROPS,
146 mmTPC4_CMDQ_GLBL_SECURE_PROPS,
147 mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
148 mmTPC4_CFG_ARUSER,
149 mmTPC4_CFG_AWUSER,
150 mmTPC5_QM_GLBL_SECURE_PROPS,
151 mmTPC5_QM_GLBL_NON_SECURE_PROPS,
152 mmTPC5_CMDQ_GLBL_SECURE_PROPS,
153 mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
154 mmTPC5_CFG_ARUSER,
155 mmTPC5_CFG_AWUSER,
156 mmTPC6_QM_GLBL_SECURE_PROPS,
157 mmTPC6_QM_GLBL_NON_SECURE_PROPS,
158 mmTPC6_CMDQ_GLBL_SECURE_PROPS,
159 mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
160 mmTPC6_CFG_ARUSER,
161 mmTPC6_CFG_AWUSER,
162 mmTPC7_QM_GLBL_SECURE_PROPS,
163 mmTPC7_QM_GLBL_NON_SECURE_PROPS,
164 mmTPC7_CMDQ_GLBL_SECURE_PROPS,
165 mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
166 mmTPC7_CFG_ARUSER,
167 mmTPC7_CFG_AWUSER,
168 mmMME_QM_GLBL_SECURE_PROPS,
169 mmMME_QM_GLBL_NON_SECURE_PROPS,
170 mmMME_CMDQ_GLBL_SECURE_PROPS,
171 mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
172 mmMME_SBA_CONTROL_DATA,
173 mmMME_SBB_CONTROL_DATA,
174 mmMME_SBC_CONTROL_DATA,
175 mmMME_WBC_CONTROL_DATA
176};
177
Oded Gabbay1251f232019-02-16 00:39:18 +0200178#define GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE 121
179
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +0200180static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = {
181 GOYA_ASYNC_EVENT_ID_PCIE_IF,
182 GOYA_ASYNC_EVENT_ID_TPC0_ECC,
183 GOYA_ASYNC_EVENT_ID_TPC1_ECC,
184 GOYA_ASYNC_EVENT_ID_TPC2_ECC,
185 GOYA_ASYNC_EVENT_ID_TPC3_ECC,
186 GOYA_ASYNC_EVENT_ID_TPC4_ECC,
187 GOYA_ASYNC_EVENT_ID_TPC5_ECC,
188 GOYA_ASYNC_EVENT_ID_TPC6_ECC,
189 GOYA_ASYNC_EVENT_ID_TPC7_ECC,
190 GOYA_ASYNC_EVENT_ID_MME_ECC,
191 GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
192 GOYA_ASYNC_EVENT_ID_MMU_ECC,
193 GOYA_ASYNC_EVENT_ID_DMA_MACRO,
194 GOYA_ASYNC_EVENT_ID_DMA_ECC,
195 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
196 GOYA_ASYNC_EVENT_ID_PSOC_MEM,
197 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
198 GOYA_ASYNC_EVENT_ID_SRAM0,
199 GOYA_ASYNC_EVENT_ID_SRAM1,
200 GOYA_ASYNC_EVENT_ID_SRAM2,
201 GOYA_ASYNC_EVENT_ID_SRAM3,
202 GOYA_ASYNC_EVENT_ID_SRAM4,
203 GOYA_ASYNC_EVENT_ID_SRAM5,
204 GOYA_ASYNC_EVENT_ID_SRAM6,
205 GOYA_ASYNC_EVENT_ID_SRAM7,
206 GOYA_ASYNC_EVENT_ID_SRAM8,
207 GOYA_ASYNC_EVENT_ID_SRAM9,
208 GOYA_ASYNC_EVENT_ID_SRAM10,
209 GOYA_ASYNC_EVENT_ID_SRAM11,
210 GOYA_ASYNC_EVENT_ID_SRAM12,
211 GOYA_ASYNC_EVENT_ID_SRAM13,
212 GOYA_ASYNC_EVENT_ID_SRAM14,
213 GOYA_ASYNC_EVENT_ID_SRAM15,
214 GOYA_ASYNC_EVENT_ID_SRAM16,
215 GOYA_ASYNC_EVENT_ID_SRAM17,
216 GOYA_ASYNC_EVENT_ID_SRAM18,
217 GOYA_ASYNC_EVENT_ID_SRAM19,
218 GOYA_ASYNC_EVENT_ID_SRAM20,
219 GOYA_ASYNC_EVENT_ID_SRAM21,
220 GOYA_ASYNC_EVENT_ID_SRAM22,
221 GOYA_ASYNC_EVENT_ID_SRAM23,
222 GOYA_ASYNC_EVENT_ID_SRAM24,
223 GOYA_ASYNC_EVENT_ID_SRAM25,
224 GOYA_ASYNC_EVENT_ID_SRAM26,
225 GOYA_ASYNC_EVENT_ID_SRAM27,
226 GOYA_ASYNC_EVENT_ID_SRAM28,
227 GOYA_ASYNC_EVENT_ID_SRAM29,
228 GOYA_ASYNC_EVENT_ID_GIC500,
229 GOYA_ASYNC_EVENT_ID_PLL0,
230 GOYA_ASYNC_EVENT_ID_PLL1,
231 GOYA_ASYNC_EVENT_ID_PLL3,
232 GOYA_ASYNC_EVENT_ID_PLL4,
233 GOYA_ASYNC_EVENT_ID_PLL5,
234 GOYA_ASYNC_EVENT_ID_PLL6,
235 GOYA_ASYNC_EVENT_ID_AXI_ECC,
236 GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
237 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
238 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
239 GOYA_ASYNC_EVENT_ID_PCIE_DEC,
240 GOYA_ASYNC_EVENT_ID_TPC0_DEC,
241 GOYA_ASYNC_EVENT_ID_TPC1_DEC,
242 GOYA_ASYNC_EVENT_ID_TPC2_DEC,
243 GOYA_ASYNC_EVENT_ID_TPC3_DEC,
244 GOYA_ASYNC_EVENT_ID_TPC4_DEC,
245 GOYA_ASYNC_EVENT_ID_TPC5_DEC,
246 GOYA_ASYNC_EVENT_ID_TPC6_DEC,
247 GOYA_ASYNC_EVENT_ID_TPC7_DEC,
248 GOYA_ASYNC_EVENT_ID_MME_WACS,
249 GOYA_ASYNC_EVENT_ID_MME_WACSD,
250 GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
251 GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
252 GOYA_ASYNC_EVENT_ID_PSOC,
253 GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
254 GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
255 GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
256 GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
257 GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
258 GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
259 GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
260 GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
261 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
262 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
263 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
264 GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
265 GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
266 GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
267 GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
268 GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
269 GOYA_ASYNC_EVENT_ID_TPC0_QM,
270 GOYA_ASYNC_EVENT_ID_TPC1_QM,
271 GOYA_ASYNC_EVENT_ID_TPC2_QM,
272 GOYA_ASYNC_EVENT_ID_TPC3_QM,
273 GOYA_ASYNC_EVENT_ID_TPC4_QM,
274 GOYA_ASYNC_EVENT_ID_TPC5_QM,
275 GOYA_ASYNC_EVENT_ID_TPC6_QM,
276 GOYA_ASYNC_EVENT_ID_TPC7_QM,
277 GOYA_ASYNC_EVENT_ID_MME_QM,
278 GOYA_ASYNC_EVENT_ID_MME_CMDQ,
279 GOYA_ASYNC_EVENT_ID_DMA0_QM,
280 GOYA_ASYNC_EVENT_ID_DMA1_QM,
281 GOYA_ASYNC_EVENT_ID_DMA2_QM,
282 GOYA_ASYNC_EVENT_ID_DMA3_QM,
283 GOYA_ASYNC_EVENT_ID_DMA4_QM,
284 GOYA_ASYNC_EVENT_ID_DMA0_CH,
285 GOYA_ASYNC_EVENT_ID_DMA1_CH,
286 GOYA_ASYNC_EVENT_ID_DMA2_CH,
287 GOYA_ASYNC_EVENT_ID_DMA3_CH,
288 GOYA_ASYNC_EVENT_ID_DMA4_CH,
289 GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
290 GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
291 GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
292 GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
293 GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
294 GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
295 GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
296 GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
297 GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
298 GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
299 GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
300 GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
301 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
302};
303
Oded Gabbayd91389b2019-02-16 00:39:19 +0200304static int goya_armcp_info_get(struct hl_device *hdev);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200305static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
306static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200307static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200308static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
309 u64 phys_addr);
Oded Gabbayd91389b2019-02-16 00:39:19 +0200310
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200311static void goya_get_fixed_properties(struct hl_device *hdev)
312{
313 struct asic_fixed_properties *prop = &hdev->asic_prop;
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200314 int i;
315
316 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
317 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
318 prop->hw_queues_props[i].kmd_only = 0;
319 }
320
321 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
322 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
323 prop->hw_queues_props[i].kmd_only = 1;
324 }
325
326 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
327 NUMBER_OF_INT_HW_QUEUES; i++) {
328 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
329 prop->hw_queues_props[i].kmd_only = 0;
330 }
331
332 for (; i < HL_MAX_QUEUES; i++)
333 prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200334
335 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
336
337 prop->dram_base_address = DRAM_PHYS_BASE;
338 prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
339 prop->dram_end_address = prop->dram_base_address + prop->dram_size;
340 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
341
342 prop->sram_base_address = SRAM_BASE_ADDR;
343 prop->sram_size = SRAM_SIZE;
344 prop->sram_end_address = prop->sram_base_address + prop->sram_size;
345 prop->sram_user_base_address = prop->sram_base_address +
346 SRAM_USER_BASE_OFFSET;
347
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200348 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200349 prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200350 if (hdev->pldm)
351 prop->mmu_pgt_size = 0x800000; /* 8MB */
352 else
353 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
354 prop->mmu_pte_size = HL_PTE_SIZE;
355 prop->mmu_hop_table_size = HOP_TABLE_SIZE;
356 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
357 prop->dram_page_size = PAGE_SIZE_2MB;
358
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200359 prop->host_phys_base_address = HOST_PHYS_BASE;
360 prop->va_space_host_start_address = VA_HOST_SPACE_START;
361 prop->va_space_host_end_address = VA_HOST_SPACE_END;
362 prop->va_space_dram_start_address = VA_DDR_SPACE_START;
363 prop->va_space_dram_end_address = VA_DDR_SPACE_END;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200364 prop->dram_size_for_default_page_mapping =
365 prop->va_space_dram_end_address;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200366 prop->cfg_size = CFG_SIZE;
367 prop->max_asid = MAX_ASID;
Oded Gabbay1251f232019-02-16 00:39:18 +0200368 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
Oded Gabbay839c4802019-02-16 00:39:16 +0200369 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
370 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200371 prop->max_power_default = MAX_POWER_DEFAULT;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200372 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
373
374 prop->high_pll = PLL_HIGH_DEFAULT;
375}
376
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200377int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
378{
379 struct armcp_packet pkt;
380
381 memset(&pkt, 0, sizeof(pkt));
382
383 pkt.ctl = opcode << ARMCP_PKT_CTL_OPCODE_SHIFT;
384
385 return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
386 sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
387}
388
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200389/*
390 * goya_pci_bars_map - Map PCI BARS of Goya device
391 *
392 * @hdev: pointer to hl_device structure
393 *
394 * Request PCI regions and map them to kernel virtual addresses.
395 * Returns 0 on success
396 *
397 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200398static int goya_pci_bars_map(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200399{
400 struct pci_dev *pdev = hdev->pdev;
401 int rc;
402
403 rc = pci_request_regions(pdev, HL_NAME);
404 if (rc) {
405 dev_err(hdev->dev, "Cannot obtain PCI resources\n");
406 return rc;
407 }
408
409 hdev->pcie_bar[SRAM_CFG_BAR_ID] =
410 pci_ioremap_bar(pdev, SRAM_CFG_BAR_ID);
411 if (!hdev->pcie_bar[SRAM_CFG_BAR_ID]) {
412 dev_err(hdev->dev, "pci_ioremap_bar failed for CFG\n");
413 rc = -ENODEV;
414 goto err_release_regions;
415 }
416
417 hdev->pcie_bar[MSIX_BAR_ID] = pci_ioremap_bar(pdev, MSIX_BAR_ID);
418 if (!hdev->pcie_bar[MSIX_BAR_ID]) {
419 dev_err(hdev->dev, "pci_ioremap_bar failed for MSIX\n");
420 rc = -ENODEV;
421 goto err_unmap_sram_cfg;
422 }
423
424 hdev->pcie_bar[DDR_BAR_ID] = pci_ioremap_wc_bar(pdev, DDR_BAR_ID);
425 if (!hdev->pcie_bar[DDR_BAR_ID]) {
426 dev_err(hdev->dev, "pci_ioremap_bar failed for DDR\n");
427 rc = -ENODEV;
428 goto err_unmap_msix;
429 }
430
431 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
432 (CFG_BASE - SRAM_BASE_ADDR);
433
434 return 0;
435
436err_unmap_msix:
437 iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
438err_unmap_sram_cfg:
439 iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
440err_release_regions:
441 pci_release_regions(pdev);
442
443 return rc;
444}
445
446/*
447 * goya_pci_bars_unmap - Unmap PCI BARS of Goya device
448 *
449 * @hdev: pointer to hl_device structure
450 *
451 * Release all PCI BARS and unmap their virtual addresses
452 *
453 */
454static void goya_pci_bars_unmap(struct hl_device *hdev)
455{
456 struct pci_dev *pdev = hdev->pdev;
457
458 iounmap(hdev->pcie_bar[DDR_BAR_ID]);
459 iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
460 iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
461 pci_release_regions(pdev);
462}
463
464/*
465 * goya_elbi_write - Write through the ELBI interface
466 *
467 * @hdev: pointer to hl_device structure
468 *
469 * return 0 on success, -1 on failure
470 *
471 */
472static int goya_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
473{
474 struct pci_dev *pdev = hdev->pdev;
475 ktime_t timeout;
476 u32 val;
477
478 /* Clear previous status */
479 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
480
481 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
482 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
483 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
484 PCI_CONFIG_ELBI_CTRL_WRITE);
485
486 timeout = ktime_add_ms(ktime_get(), 10);
487 for (;;) {
488 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
489 if (val & PCI_CONFIG_ELBI_STS_MASK)
490 break;
491 if (ktime_compare(ktime_get(), timeout) > 0) {
492 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
493 &val);
494 break;
495 }
496 usleep_range(300, 500);
497 }
498
499 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
500 return 0;
501
502 if (val & PCI_CONFIG_ELBI_STS_ERR) {
503 dev_err(hdev->dev, "Error writing to ELBI\n");
504 return -EIO;
505 }
506
507 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
508 dev_err(hdev->dev, "ELBI write didn't finish in time\n");
509 return -EIO;
510 }
511
512 dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
513 return -EIO;
514}
515
516/*
517 * goya_iatu_write - iatu write routine
518 *
519 * @hdev: pointer to hl_device structure
520 *
521 */
522static int goya_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
523{
524 u32 dbi_offset;
525 int rc;
526
527 dbi_offset = addr & 0xFFF;
528
529 rc = goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0x00300000);
530 rc |= goya_elbi_write(hdev, mmPCIE_DBI_BASE + dbi_offset, data);
531
532 if (rc)
533 return -EIO;
534
535 return 0;
536}
537
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200538static void goya_reset_link_through_bridge(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200539{
540 struct pci_dev *pdev = hdev->pdev;
541 struct pci_dev *parent_port;
542 u16 val;
543
544 parent_port = pdev->bus->self;
545 pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
546 val |= PCI_BRIDGE_CTL_BUS_RESET;
547 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
548 ssleep(1);
549
550 val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
551 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
552 ssleep(3);
553}
554
555/*
556 * goya_set_ddr_bar_base - set DDR bar to map specific device address
557 *
558 * @hdev: pointer to hl_device structure
559 * @addr: address in DDR. Must be aligned to DDR bar size
560 *
561 * This function configures the iATU so that the DDR bar will start at the
562 * specified addr.
563 *
564 */
565static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
566{
567 struct goya_device *goya = hdev->asic_specific;
568 int rc;
569
570 if ((goya) && (goya->ddr_bar_cur_addr == addr))
571 return 0;
572
573 /* Inbound Region 1 - Bar 4 - Point to DDR */
574 rc = goya_iatu_write(hdev, 0x314, lower_32_bits(addr));
575 rc |= goya_iatu_write(hdev, 0x318, upper_32_bits(addr));
576 rc |= goya_iatu_write(hdev, 0x300, 0);
577 /* Enable + Bar match + match enable + Bar 4 */
578 rc |= goya_iatu_write(hdev, 0x304, 0xC0080400);
579
580 /* Return the DBI window to the default location */
581 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
582 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
583
584 if (rc) {
585 dev_err(hdev->dev, "failed to map DDR bar to 0x%08llx\n", addr);
586 return -EIO;
587 }
588
589 if (goya)
590 goya->ddr_bar_cur_addr = addr;
591
592 return 0;
593}
594
595/*
596 * goya_init_iatu - Initialize the iATU unit inside the PCI controller
597 *
598 * @hdev: pointer to hl_device structure
599 *
600 * This is needed in case the firmware doesn't initialize the iATU
601 *
602 */
603static int goya_init_iatu(struct hl_device *hdev)
604{
605 int rc;
606
607 /* Inbound Region 0 - Bar 0 - Point to SRAM_BASE_ADDR */
608 rc = goya_iatu_write(hdev, 0x114, lower_32_bits(SRAM_BASE_ADDR));
609 rc |= goya_iatu_write(hdev, 0x118, upper_32_bits(SRAM_BASE_ADDR));
610 rc |= goya_iatu_write(hdev, 0x100, 0);
611 /* Enable + Bar match + match enable */
612 rc |= goya_iatu_write(hdev, 0x104, 0xC0080000);
613
614 /* Inbound Region 1 - Bar 4 - Point to DDR */
615 rc |= goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
616
617 /* Outbound Region 0 - Point to Host */
618 rc |= goya_iatu_write(hdev, 0x008, lower_32_bits(HOST_PHYS_BASE));
619 rc |= goya_iatu_write(hdev, 0x00C, upper_32_bits(HOST_PHYS_BASE));
620 rc |= goya_iatu_write(hdev, 0x010,
621 lower_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
622 rc |= goya_iatu_write(hdev, 0x014, 0);
623 rc |= goya_iatu_write(hdev, 0x018, 0);
624 rc |= goya_iatu_write(hdev, 0x020,
625 upper_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
626 /* Increase region size */
627 rc |= goya_iatu_write(hdev, 0x000, 0x00002000);
628 /* Enable */
629 rc |= goya_iatu_write(hdev, 0x004, 0x80000000);
630
631 /* Return the DBI window to the default location */
632 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
633 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
634
635 if (rc)
636 return -EIO;
637
638 return 0;
639}
640
641/*
642 * goya_early_init - GOYA early initialization code
643 *
644 * @hdev: pointer to hl_device structure
645 *
646 * Verify PCI bars
647 * Set DMA masks
648 * PCI controller initialization
649 * Map PCI bars
650 *
651 */
652static int goya_early_init(struct hl_device *hdev)
653{
654 struct asic_fixed_properties *prop = &hdev->asic_prop;
655 struct pci_dev *pdev = hdev->pdev;
656 u32 val;
657 int rc;
658
659 goya_get_fixed_properties(hdev);
660
661 /* Check BAR sizes */
662 if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
663 dev_err(hdev->dev,
664 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
665 SRAM_CFG_BAR_ID,
666 (unsigned long long) pci_resource_len(pdev,
667 SRAM_CFG_BAR_ID),
668 CFG_BAR_SIZE);
669 return -ENODEV;
670 }
671
672 if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
673 dev_err(hdev->dev,
674 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
675 MSIX_BAR_ID,
676 (unsigned long long) pci_resource_len(pdev,
677 MSIX_BAR_ID),
678 MSIX_BAR_SIZE);
679 return -ENODEV;
680 }
681
682 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
683
684 /* set DMA mask for GOYA */
685 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
686 if (rc) {
687 dev_warn(hdev->dev, "Unable to set pci dma mask to 39 bits\n");
688 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
689 if (rc) {
690 dev_err(hdev->dev,
691 "Unable to set pci dma mask to 32 bits\n");
692 return rc;
693 }
694 }
695
696 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
697 if (rc) {
698 dev_warn(hdev->dev,
699 "Unable to set pci consistent dma mask to 39 bits\n");
700 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
701 if (rc) {
702 dev_err(hdev->dev,
703 "Unable to set pci consistent dma mask to 32 bits\n");
704 return rc;
705 }
706 }
707
708 if (hdev->reset_pcilink)
709 goya_reset_link_through_bridge(hdev);
710
711 rc = pci_enable_device_mem(pdev);
712 if (rc) {
713 dev_err(hdev->dev, "can't enable PCI device\n");
714 return rc;
715 }
716
717 pci_set_master(pdev);
718
719 rc = goya_init_iatu(hdev);
720 if (rc) {
721 dev_err(hdev->dev, "Failed to initialize iATU\n");
722 goto disable_device;
723 }
724
725 rc = goya_pci_bars_map(hdev);
726 if (rc) {
727 dev_err(hdev->dev, "Failed to initialize PCI BARS\n");
728 goto disable_device;
729 }
730
Oded Gabbay839c4802019-02-16 00:39:16 +0200731 if (!hdev->pldm) {
732 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
733 if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
734 dev_warn(hdev->dev,
735 "PCI strap is not configured correctly, PCI bus errors may occur\n");
736 }
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200737
738 return 0;
739
740disable_device:
741 pci_clear_master(pdev);
742 pci_disable_device(pdev);
743
744 return rc;
745}
746
747/*
748 * goya_early_fini - GOYA early finalization code
749 *
750 * @hdev: pointer to hl_device structure
751 *
752 * Unmap PCI bars
753 *
754 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200755static int goya_early_fini(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200756{
757 goya_pci_bars_unmap(hdev);
758
759 pci_clear_master(hdev->pdev);
760 pci_disable_device(hdev->pdev);
761
762 return 0;
763}
764
765/*
Oded Gabbayd91389b2019-02-16 00:39:19 +0200766 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
767 *
768 * @hdev: pointer to hl_device structure
769 *
770 */
771static void goya_fetch_psoc_frequency(struct hl_device *hdev)
772{
773 struct asic_fixed_properties *prop = &hdev->asic_prop;
774
775 prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
776 prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
777 prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
778 prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
779}
780
781/*
782 * goya_late_init - GOYA late initialization code
783 *
784 * @hdev: pointer to hl_device structure
785 *
786 * Get ArmCP info and send message to CPU to enable PCI access
787 */
788static int goya_late_init(struct hl_device *hdev)
789{
790 struct asic_fixed_properties *prop = &hdev->asic_prop;
791 struct goya_device *goya = hdev->asic_specific;
792 int rc;
793
794 rc = goya->armcp_info_get(hdev);
795 if (rc) {
796 dev_err(hdev->dev, "Failed to get armcp info\n");
797 return rc;
798 }
799
800 /* Now that we have the DRAM size in ASIC prop, we need to check
801 * its size and configure the DMA_IF DDR wrap protection (which is in
802 * the MMU block) accordingly. The value is the log2 of the DRAM size
803 */
804 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
805
806 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
807 if (rc) {
808 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
809 return rc;
810 }
811
812 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
813 GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
814
815 goya_fetch_psoc_frequency(hdev);
816
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200817 rc = goya_mmu_clear_pgt_range(hdev);
818 if (rc) {
819 dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
820 goto disable_pci_access;
821 }
822
Omer Shpigelman27ca384c2019-02-28 10:46:11 +0200823 rc = goya_mmu_set_dram_default_page(hdev);
824 if (rc) {
825 dev_err(hdev->dev, "Failed to set DRAM default page\n");
826 goto disable_pci_access;
827 }
828
Oded Gabbayd91389b2019-02-16 00:39:19 +0200829 return 0;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200830
831disable_pci_access:
832 goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
833
834 return rc;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200835}
836
837/*
838 * goya_late_fini - GOYA late tear-down code
839 *
840 * @hdev: pointer to hl_device structure
841 *
842 * Free sensors allocated structures
843 */
844void goya_late_fini(struct hl_device *hdev)
845{
846 const struct hwmon_channel_info **channel_info_arr;
847 int i = 0;
848
849 if (!hdev->hl_chip_info->info)
850 return;
851
852 channel_info_arr = hdev->hl_chip_info->info;
853
854 while (channel_info_arr[i]) {
855 kfree(channel_info_arr[i]->config);
856 kfree(channel_info_arr[i]);
857 i++;
858 }
859
860 kfree(channel_info_arr);
861
862 hdev->hl_chip_info->info = NULL;
863}
864
865/*
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200866 * goya_sw_init - Goya software initialization code
867 *
868 * @hdev: pointer to hl_device structure
869 *
870 */
871static int goya_sw_init(struct hl_device *hdev)
872{
873 struct goya_device *goya;
874 int rc;
875
876 /* Allocate device structure */
877 goya = kzalloc(sizeof(*goya), GFP_KERNEL);
878 if (!goya)
879 return -ENOMEM;
880
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200881 goya->test_cpu_queue = goya_test_cpu_queue;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200882 goya->armcp_info_get = goya_armcp_info_get;
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200883
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200884 /* according to goya_init_iatu */
885 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
Oded Gabbayd91389b2019-02-16 00:39:19 +0200886
887 goya->mme_clk = GOYA_PLL_FREQ_LOW;
888 goya->tpc_clk = GOYA_PLL_FREQ_LOW;
889 goya->ic_clk = GOYA_PLL_FREQ_LOW;
890
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200891 hdev->asic_specific = goya;
892
893 /* Create DMA pool for small allocations */
894 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
895 &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
896 if (!hdev->dma_pool) {
897 dev_err(hdev->dev, "failed to create DMA pool\n");
898 rc = -ENOMEM;
899 goto free_goya_device;
900 }
901
902 hdev->cpu_accessible_dma_mem =
903 hdev->asic_funcs->dma_alloc_coherent(hdev,
904 CPU_ACCESSIBLE_MEM_SIZE,
905 &hdev->cpu_accessible_dma_address,
906 GFP_KERNEL | __GFP_ZERO);
907
908 if (!hdev->cpu_accessible_dma_mem) {
909 dev_err(hdev->dev,
910 "failed to allocate %d of dma memory for CPU accessible memory space\n",
911 CPU_ACCESSIBLE_MEM_SIZE);
912 rc = -ENOMEM;
913 goto free_dma_pool;
914 }
915
916 hdev->cpu_accessible_dma_pool = gen_pool_create(CPU_PKT_SHIFT, -1);
917 if (!hdev->cpu_accessible_dma_pool) {
918 dev_err(hdev->dev,
919 "Failed to create CPU accessible DMA pool\n");
920 rc = -ENOMEM;
921 goto free_cpu_pq_dma_mem;
922 }
923
924 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
925 (uintptr_t) hdev->cpu_accessible_dma_mem,
926 CPU_ACCESSIBLE_MEM_SIZE, -1);
927 if (rc) {
928 dev_err(hdev->dev,
929 "Failed to add memory to CPU accessible DMA pool\n");
930 rc = -EFAULT;
931 goto free_cpu_pq_pool;
932 }
933
934 spin_lock_init(&goya->hw_queues_lock);
935
936 return 0;
937
938free_cpu_pq_pool:
939 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
940free_cpu_pq_dma_mem:
941 hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
942 hdev->cpu_accessible_dma_mem,
943 hdev->cpu_accessible_dma_address);
944free_dma_pool:
945 dma_pool_destroy(hdev->dma_pool);
946free_goya_device:
947 kfree(goya);
948
949 return rc;
950}
951
952/*
953 * goya_sw_fini - Goya software tear-down code
954 *
955 * @hdev: pointer to hl_device structure
956 *
957 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +0200958static int goya_sw_fini(struct hl_device *hdev)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +0200959{
960 struct goya_device *goya = hdev->asic_specific;
961
962 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
963
964 hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
965 hdev->cpu_accessible_dma_mem,
966 hdev->cpu_accessible_dma_address);
967
968 dma_pool_destroy(hdev->dma_pool);
969
970 kfree(goya);
971
972 return 0;
973}
974
Oded Gabbay9494a8d2019-02-16 00:39:17 +0200975static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
976 dma_addr_t bus_address)
977{
978 struct goya_device *goya = hdev->asic_specific;
979 u32 mtr_base_lo, mtr_base_hi;
980 u32 so_base_lo, so_base_hi;
981 u32 gic_base_lo, gic_base_hi;
982 u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
983
984 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
985 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
986 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
987 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
988
989 gic_base_lo =
990 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
991 gic_base_hi =
992 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
993
994 WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
995 WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
996
997 WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
998 WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
999 WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
1000
1001 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1002 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1003 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1004 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1005 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1006 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1007 WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
1008 GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
1009
1010 /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
1011 WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
1012 WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
1013
Oded Gabbay1251f232019-02-16 00:39:18 +02001014 if (goya->hw_cap_initialized & HW_CAP_MMU)
1015 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001016 else
Oded Gabbay1251f232019-02-16 00:39:18 +02001017 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001018
1019 WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN);
1020 WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
1021}
1022
1023static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
1024{
1025 u32 gic_base_lo, gic_base_hi;
1026 u64 sob_addr;
1027 u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
1028
1029 gic_base_lo =
1030 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1031 gic_base_hi =
1032 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1033
1034 WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
1035 WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
1036 WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
1037 GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
1038
1039 if (dma_id) {
1040 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
1041 (dma_id - 1) * 4;
1042 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + reg_off,
1043 lower_32_bits(sob_addr));
1044 WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off,
1045 upper_32_bits(sob_addr));
1046 WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
1047 }
1048}
1049
1050/*
1051 * goya_init_dma_qmans - Initialize QMAN DMA registers
1052 *
1053 * @hdev: pointer to hl_device structure
1054 *
1055 * Initialize the H/W registers of the QMAN DMA channels
1056 *
1057 */
1058static void goya_init_dma_qmans(struct hl_device *hdev)
1059{
1060 struct goya_device *goya = hdev->asic_specific;
1061 struct hl_hw_queue *q;
1062 dma_addr_t bus_address;
1063 int i;
1064
1065 if (goya->hw_cap_initialized & HW_CAP_DMA)
1066 return;
1067
1068 q = &hdev->kernel_queues[0];
1069
1070 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
1071 bus_address = q->bus_address +
1072 hdev->asic_prop.host_phys_base_address;
1073
1074 goya_init_dma_qman(hdev, i, bus_address);
1075 goya_init_dma_ch(hdev, i);
1076 }
1077
1078 goya->hw_cap_initialized |= HW_CAP_DMA;
1079}
1080
1081/*
1082 * goya_disable_external_queues - Disable external queues
1083 *
1084 * @hdev: pointer to hl_device structure
1085 *
1086 */
1087static void goya_disable_external_queues(struct hl_device *hdev)
1088{
1089 WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
1090 WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
1091 WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
1092 WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
1093 WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
1094}
1095
1096static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
1097 u32 cp_sts_reg, u32 glbl_sts0_reg)
1098{
1099 int rc;
1100 u32 status;
1101
1102 /* use the values of TPC0 as they are all the same*/
1103
1104 WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
1105
1106 status = RREG32(cp_sts_reg);
1107 if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
1108 rc = hl_poll_timeout(
1109 hdev,
1110 cp_sts_reg,
1111 status,
1112 !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
1113 1000,
1114 QMAN_FENCE_TIMEOUT_USEC);
1115
1116 /* if QMAN is stuck in fence no need to check for stop */
1117 if (rc)
1118 return 0;
1119 }
1120
1121 rc = hl_poll_timeout(
1122 hdev,
1123 glbl_sts0_reg,
1124 status,
1125 (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
1126 1000,
1127 QMAN_STOP_TIMEOUT_USEC);
1128
1129 if (rc) {
1130 dev_err(hdev->dev,
1131 "Timeout while waiting for QMAN to stop\n");
1132 return -EINVAL;
1133 }
1134
1135 return 0;
1136}
1137
1138/*
1139 * goya_stop_external_queues - Stop external queues
1140 *
1141 * @hdev: pointer to hl_device structure
1142 *
1143 * Returns 0 on success
1144 *
1145 */
1146static int goya_stop_external_queues(struct hl_device *hdev)
1147{
1148 int rc, retval = 0;
1149
1150 rc = goya_stop_queue(hdev,
1151 mmDMA_QM_0_GLBL_CFG1,
1152 mmDMA_QM_0_CP_STS,
1153 mmDMA_QM_0_GLBL_STS0);
1154
1155 if (rc) {
1156 dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
1157 retval = -EIO;
1158 }
1159
1160 rc = goya_stop_queue(hdev,
1161 mmDMA_QM_1_GLBL_CFG1,
1162 mmDMA_QM_1_CP_STS,
1163 mmDMA_QM_1_GLBL_STS0);
1164
1165 if (rc) {
1166 dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
1167 retval = -EIO;
1168 }
1169
1170 rc = goya_stop_queue(hdev,
1171 mmDMA_QM_2_GLBL_CFG1,
1172 mmDMA_QM_2_CP_STS,
1173 mmDMA_QM_2_GLBL_STS0);
1174
1175 if (rc) {
1176 dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
1177 retval = -EIO;
1178 }
1179
1180 rc = goya_stop_queue(hdev,
1181 mmDMA_QM_3_GLBL_CFG1,
1182 mmDMA_QM_3_CP_STS,
1183 mmDMA_QM_3_GLBL_STS0);
1184
1185 if (rc) {
1186 dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
1187 retval = -EIO;
1188 }
1189
1190 rc = goya_stop_queue(hdev,
1191 mmDMA_QM_4_GLBL_CFG1,
1192 mmDMA_QM_4_CP_STS,
1193 mmDMA_QM_4_GLBL_STS0);
1194
1195 if (rc) {
1196 dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
1197 retval = -EIO;
1198 }
1199
1200 return retval;
1201}
1202
1203static void goya_resume_external_queues(struct hl_device *hdev)
1204{
1205 WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
1206 WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
1207 WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
1208 WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
1209 WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
1210}
1211
1212/*
1213 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1214 *
1215 * @hdev: pointer to hl_device structure
1216 *
1217 * Returns 0 on success
1218 *
1219 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +02001220static int goya_init_cpu_queues(struct hl_device *hdev)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001221{
1222 struct goya_device *goya = hdev->asic_specific;
Oded Gabbay1251f232019-02-16 00:39:18 +02001223 struct hl_eq *eq;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001224 dma_addr_t bus_address;
1225 u32 status;
1226 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
1227 int err;
1228
1229 if (!hdev->cpu_queues_enable)
1230 return 0;
1231
1232 if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
1233 return 0;
1234
Oded Gabbay1251f232019-02-16 00:39:18 +02001235 eq = &hdev->event_queue;
1236
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001237 bus_address = cpu_pq->bus_address +
1238 hdev->asic_prop.host_phys_base_address;
1239 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, lower_32_bits(bus_address));
1240 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1, upper_32_bits(bus_address));
1241
Oded Gabbay1251f232019-02-16 00:39:18 +02001242 bus_address = eq->bus_address + hdev->asic_prop.host_phys_base_address;
1243 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(bus_address));
1244 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(bus_address));
1245
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001246 bus_address = hdev->cpu_accessible_dma_address +
1247 hdev->asic_prop.host_phys_base_address;
1248 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, lower_32_bits(bus_address));
1249 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, upper_32_bits(bus_address));
1250
1251 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
Oded Gabbay1251f232019-02-16 00:39:18 +02001252 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001253 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, CPU_ACCESSIBLE_MEM_SIZE);
1254
1255 /* Used for EQ CI */
1256 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
1257
1258 WREG32(mmCPU_IF_PF_PQ_PI, 0);
1259
1260 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP);
1261
1262 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1263 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1264
1265 err = hl_poll_timeout(
1266 hdev,
1267 mmPSOC_GLOBAL_CONF_SCRATCHPAD_7,
1268 status,
1269 (status == PQ_INIT_STATUS_READY_FOR_HOST),
1270 1000,
1271 GOYA_CPU_TIMEOUT_USEC);
1272
1273 if (err) {
1274 dev_err(hdev->dev,
1275 "Failed to communicate with ARM CPU (ArmCP timeout)\n");
1276 return -EIO;
1277 }
1278
1279 goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1280 return 0;
1281}
1282
Oded Gabbay839c4802019-02-16 00:39:16 +02001283static void goya_set_pll_refclk(struct hl_device *hdev)
1284{
1285 WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1286 WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1287 WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1288 WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1289
1290 WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1291 WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1292 WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1293 WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1294
1295 WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1296 WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1297 WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1298 WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1299
1300 WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1301 WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1302 WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1303 WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1304
1305 WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1306 WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1307 WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1308 WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1309
1310 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1311 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1312 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1313 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1314
1315 WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1316 WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1317 WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1318 WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1319}
1320
1321static void goya_disable_clk_rlx(struct hl_device *hdev)
1322{
1323 WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1324 WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1325}
1326
1327static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1328{
1329 u64 tpc_eml_address;
1330 u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1331 int err, slm_index;
1332
1333 tpc_offset = tpc_id * 0x40000;
1334 tpc_eml_offset = tpc_id * 0x200000;
1335 tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1336 tpc_slm_offset = tpc_eml_address + 0x100000;
1337
1338 /*
1339 * Workaround for Bug H2 #2443 :
1340 * "TPC SB is not initialized on chip reset"
1341 */
1342
1343 val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1344 if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1345 dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1346 tpc_id);
1347
1348 WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1349
1350 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1351 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1352 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1353 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1354 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1355 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1356 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1357 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1358 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1359 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1360
1361 WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1362 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1363
1364 err = hl_poll_timeout(
1365 hdev,
1366 mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1367 val,
1368 (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1369 1000,
1370 HL_DEVICE_TIMEOUT_USEC);
1371
1372 if (err)
1373 dev_err(hdev->dev,
1374 "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1375
1376 WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1377 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1378
1379 msleep(GOYA_RESET_WAIT_MSEC);
1380
1381 WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1382 ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1383
1384 msleep(GOYA_RESET_WAIT_MSEC);
1385
1386 for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1387 WREG32(tpc_slm_offset + (slm_index << 2), 0);
1388
1389 val = RREG32(tpc_slm_offset);
1390}
1391
1392static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1393{
1394 struct goya_device *goya = hdev->asic_specific;
1395 int i;
1396
1397 if (hdev->pldm)
1398 return;
1399
1400 if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1401 return;
1402
1403 /* Workaround for H2 #2443 */
1404
1405 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1406 _goya_tpc_mbist_workaround(hdev, i);
1407
1408 goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1409}
1410
1411/*
1412 * goya_init_golden_registers - Initialize golden registers
1413 *
1414 * @hdev: pointer to hl_device structure
1415 *
1416 * Initialize the H/W registers of the device
1417 *
1418 */
1419static void goya_init_golden_registers(struct hl_device *hdev)
1420{
1421 struct goya_device *goya = hdev->asic_specific;
1422 u32 polynom[10], tpc_intr_mask, offset;
1423 int i;
1424
1425 if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1426 return;
1427
1428 polynom[0] = 0x00020080;
1429 polynom[1] = 0x00401000;
1430 polynom[2] = 0x00200800;
1431 polynom[3] = 0x00002000;
1432 polynom[4] = 0x00080200;
1433 polynom[5] = 0x00040100;
1434 polynom[6] = 0x00100400;
1435 polynom[7] = 0x00004000;
1436 polynom[8] = 0x00010000;
1437 polynom[9] = 0x00008000;
1438
1439 /* Mask all arithmetic interrupts from TPC */
1440 tpc_intr_mask = 0x7FFF;
1441
1442 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1443 WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1444 WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1445 WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1446 WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1447 WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1448
1449 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1450 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1451 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1452 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1453 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1454
1455
1456 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1457 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1458 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1459 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1460 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1461
1462 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1463 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1464 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1465 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1466 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1467
1468 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1469 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1470 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1471 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1472 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1473
1474 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1475 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1476 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1477 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1478 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1479 }
1480
1481 WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1482 WREG32(mmMME_AGU, 0x0f0f0f10);
1483 WREG32(mmMME_SEI_MASK, ~0x0);
1484
1485 WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1486 WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1487 WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1488 WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1489 WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1490 WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1491 WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1492 WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1493 WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1494 WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1495 WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1496 WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1497 WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1498 WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1499 WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1500 WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1501 WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1502 WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1503 WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1504 WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1505 WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1506 WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1507 WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1508 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1509 WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1510 WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1511 WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1512 WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1513 WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1514 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1515 WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1516 WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1517 WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1518 WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1519 WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1520 WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1521 WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1522 WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1523 WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1524 WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1525 WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1526 WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1527 WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1528 WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1529 WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1530 WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1531 WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1532 WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1533 WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1534 WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1535 WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1536 WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1537 WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1538 WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1539 WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1540 WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1541 WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1542 WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1543 WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1544 WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1545 WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1546 WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1547 WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1548 WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1549 WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1550 WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1551 WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1552 WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1553 WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1554 WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1555 WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1556 WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1557 WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1558 WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1559 WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1560 WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1561 WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1562 WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1563 WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1564 WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1565 WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1566 WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1567 WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1568 WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1569
1570 WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1571 WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1572 WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1573 WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1574 WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1575 WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1576 WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1577 WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1578 WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1579 WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1580 WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1581 WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1582
1583 WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1584 WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1585 WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1586 WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1587 WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1588 WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1589 WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1590 WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1591 WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1592 WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1593 WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1594 WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1595
1596 WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1597 WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1598 WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1599 WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1600 WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1601 WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1602 WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1603 WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1604 WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1605 WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1606 WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1607 WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1608
1609 WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1610 WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1611 WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1612 WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1613 WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1614 WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1615 WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1616 WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1617 WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1618 WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1619 WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1620 WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1621
1622 WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1623 WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1624 WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1625 WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1626 WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1627 WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1628 WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1629 WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1630 WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1631 WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1632 WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1633 WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1634
1635 WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1636 WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1637 WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1638 WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1639 WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1640 WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1641 WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1642 WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1643 WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1644 WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1645 WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1646 WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1647
1648 for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1649 WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1650 WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1651 WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1652 WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1653 WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1654 WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1655
1656 WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1657 WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1658 WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1659 WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1660 WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1661 WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1662 WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1663 WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1664
1665 WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1666 WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1667 }
1668
1669 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1670 WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1671 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1672 WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1673 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1674 }
1675
1676 for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1677 /*
1678 * Workaround for Bug H2 #2441 :
1679 * "ST.NOP set trace event illegal opcode"
1680 */
1681 WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1682
1683 WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1684 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1685 WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1686 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1687 }
1688
1689 WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1690 WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1691 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1692
1693 WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1694 WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1695 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1696
1697 /*
1698 * Workaround for H2 #HW-23 bug
1699 * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
1700 * to 16 on KMD DMA
1701 * We need to limit only these DMAs because the user can only read
1702 * from Host using DMA CH 1
1703 */
1704 WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
1705 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1706
1707 goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1708}
1709
Oded Gabbay9494a8d2019-02-16 00:39:17 +02001710static void goya_init_mme_qman(struct hl_device *hdev)
1711{
1712 u32 mtr_base_lo, mtr_base_hi;
1713 u32 so_base_lo, so_base_hi;
1714 u32 gic_base_lo, gic_base_hi;
1715 u64 qman_base_addr;
1716
1717 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1718 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1719 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1720 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1721
1722 gic_base_lo =
1723 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1724 gic_base_hi =
1725 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1726
1727 qman_base_addr = hdev->asic_prop.sram_base_address +
1728 MME_QMAN_BASE_OFFSET;
1729
1730 WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1731 WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1732 WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1733 WREG32(mmMME_QM_PQ_PI, 0);
1734 WREG32(mmMME_QM_PQ_CI, 0);
1735 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1736 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1737 WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1738 WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1739
1740 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1741 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1742 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1743 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1744
1745 /* QMAN CQ has 8 cache lines */
1746 WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1747
1748 WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1749 WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1750
1751 WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1752
1753 WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1754
1755 WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1756
1757 WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1758}
1759
1760static void goya_init_mme_cmdq(struct hl_device *hdev)
1761{
1762 u32 mtr_base_lo, mtr_base_hi;
1763 u32 so_base_lo, so_base_hi;
1764 u32 gic_base_lo, gic_base_hi;
1765 u64 qman_base_addr;
1766
1767 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1768 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1769 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1770 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1771
1772 gic_base_lo =
1773 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1774 gic_base_hi =
1775 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1776
1777 qman_base_addr = hdev->asic_prop.sram_base_address +
1778 MME_QMAN_BASE_OFFSET;
1779
1780 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1781 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1782 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1783 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1784
1785 /* CMDQ CQ has 20 cache lines */
1786 WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1787
1788 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1789 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1790
1791 WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1792
1793 WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1794
1795 WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1796
1797 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1798}
1799
1800static void goya_init_mme_qmans(struct hl_device *hdev)
1801{
1802 struct goya_device *goya = hdev->asic_specific;
1803 u32 so_base_lo, so_base_hi;
1804
1805 if (goya->hw_cap_initialized & HW_CAP_MME)
1806 return;
1807
1808 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1809 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1810
1811 WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1812 WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1813
1814 goya_init_mme_qman(hdev);
1815 goya_init_mme_cmdq(hdev);
1816
1817 goya->hw_cap_initialized |= HW_CAP_MME;
1818}
1819
1820static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1821{
1822 u32 mtr_base_lo, mtr_base_hi;
1823 u32 so_base_lo, so_base_hi;
1824 u32 gic_base_lo, gic_base_hi;
1825 u64 qman_base_addr;
1826 u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1827
1828 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1829 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1830 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1831 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1832
1833 gic_base_lo =
1834 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1835 gic_base_hi =
1836 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1837
1838 qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1839
1840 WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1841 WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1842 WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1843 WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1844 WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1845 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1846 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1847 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1848 WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1849
1850 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1851 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1852 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1853 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1854
1855 WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1856
1857 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1858 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1859
1860 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1861 GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1862
1863 WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1864
1865 WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1866
1867 WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1868}
1869
1870static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1871{
1872 u32 mtr_base_lo, mtr_base_hi;
1873 u32 so_base_lo, so_base_hi;
1874 u32 gic_base_lo, gic_base_hi;
1875 u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1876
1877 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1878 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1879 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1880 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1881
1882 gic_base_lo =
1883 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1884 gic_base_hi =
1885 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1886
1887 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1888 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1889 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1890 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1891
1892 WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
1893
1894 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1895 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1896
1897 WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
1898 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
1899
1900 WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
1901
1902 WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
1903
1904 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
1905}
1906
1907static void goya_init_tpc_qmans(struct hl_device *hdev)
1908{
1909 struct goya_device *goya = hdev->asic_specific;
1910 u32 so_base_lo, so_base_hi;
1911 u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
1912 mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
1913 int i;
1914
1915 if (goya->hw_cap_initialized & HW_CAP_TPC)
1916 return;
1917
1918 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1919 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1920
1921 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
1922 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
1923 so_base_lo);
1924 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
1925 so_base_hi);
1926 }
1927
1928 goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
1929 goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
1930 goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
1931 goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
1932 goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
1933 goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
1934 goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
1935 goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
1936
1937 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1938 goya_init_tpc_cmdq(hdev, i);
1939
1940 goya->hw_cap_initialized |= HW_CAP_TPC;
1941}
1942
1943/*
1944 * goya_disable_internal_queues - Disable internal queues
1945 *
1946 * @hdev: pointer to hl_device structure
1947 *
1948 */
1949static void goya_disable_internal_queues(struct hl_device *hdev)
1950{
1951 WREG32(mmMME_QM_GLBL_CFG0, 0);
1952 WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
1953
1954 WREG32(mmTPC0_QM_GLBL_CFG0, 0);
1955 WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
1956
1957 WREG32(mmTPC1_QM_GLBL_CFG0, 0);
1958 WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
1959
1960 WREG32(mmTPC2_QM_GLBL_CFG0, 0);
1961 WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
1962
1963 WREG32(mmTPC3_QM_GLBL_CFG0, 0);
1964 WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
1965
1966 WREG32(mmTPC4_QM_GLBL_CFG0, 0);
1967 WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
1968
1969 WREG32(mmTPC5_QM_GLBL_CFG0, 0);
1970 WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
1971
1972 WREG32(mmTPC6_QM_GLBL_CFG0, 0);
1973 WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
1974
1975 WREG32(mmTPC7_QM_GLBL_CFG0, 0);
1976 WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
1977}
1978
1979/*
1980 * goya_stop_internal_queues - Stop internal queues
1981 *
1982 * @hdev: pointer to hl_device structure
1983 *
1984 * Returns 0 on success
1985 *
1986 */
1987static int goya_stop_internal_queues(struct hl_device *hdev)
1988{
1989 int rc, retval = 0;
1990
1991 /*
1992 * Each queue (QMAN) is a separate H/W logic. That means that each
1993 * QMAN can be stopped independently and failure to stop one does NOT
1994 * mandate we should not try to stop other QMANs
1995 */
1996
1997 rc = goya_stop_queue(hdev,
1998 mmMME_QM_GLBL_CFG1,
1999 mmMME_QM_CP_STS,
2000 mmMME_QM_GLBL_STS0);
2001
2002 if (rc) {
2003 dev_err(hdev->dev, "failed to stop MME QMAN\n");
2004 retval = -EIO;
2005 }
2006
2007 rc = goya_stop_queue(hdev,
2008 mmMME_CMDQ_GLBL_CFG1,
2009 mmMME_CMDQ_CP_STS,
2010 mmMME_CMDQ_GLBL_STS0);
2011
2012 if (rc) {
2013 dev_err(hdev->dev, "failed to stop MME CMDQ\n");
2014 retval = -EIO;
2015 }
2016
2017 rc = goya_stop_queue(hdev,
2018 mmTPC0_QM_GLBL_CFG1,
2019 mmTPC0_QM_CP_STS,
2020 mmTPC0_QM_GLBL_STS0);
2021
2022 if (rc) {
2023 dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
2024 retval = -EIO;
2025 }
2026
2027 rc = goya_stop_queue(hdev,
2028 mmTPC0_CMDQ_GLBL_CFG1,
2029 mmTPC0_CMDQ_CP_STS,
2030 mmTPC0_CMDQ_GLBL_STS0);
2031
2032 if (rc) {
2033 dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
2034 retval = -EIO;
2035 }
2036
2037 rc = goya_stop_queue(hdev,
2038 mmTPC1_QM_GLBL_CFG1,
2039 mmTPC1_QM_CP_STS,
2040 mmTPC1_QM_GLBL_STS0);
2041
2042 if (rc) {
2043 dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
2044 retval = -EIO;
2045 }
2046
2047 rc = goya_stop_queue(hdev,
2048 mmTPC1_CMDQ_GLBL_CFG1,
2049 mmTPC1_CMDQ_CP_STS,
2050 mmTPC1_CMDQ_GLBL_STS0);
2051
2052 if (rc) {
2053 dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
2054 retval = -EIO;
2055 }
2056
2057 rc = goya_stop_queue(hdev,
2058 mmTPC2_QM_GLBL_CFG1,
2059 mmTPC2_QM_CP_STS,
2060 mmTPC2_QM_GLBL_STS0);
2061
2062 if (rc) {
2063 dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
2064 retval = -EIO;
2065 }
2066
2067 rc = goya_stop_queue(hdev,
2068 mmTPC2_CMDQ_GLBL_CFG1,
2069 mmTPC2_CMDQ_CP_STS,
2070 mmTPC2_CMDQ_GLBL_STS0);
2071
2072 if (rc) {
2073 dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
2074 retval = -EIO;
2075 }
2076
2077 rc = goya_stop_queue(hdev,
2078 mmTPC3_QM_GLBL_CFG1,
2079 mmTPC3_QM_CP_STS,
2080 mmTPC3_QM_GLBL_STS0);
2081
2082 if (rc) {
2083 dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
2084 retval = -EIO;
2085 }
2086
2087 rc = goya_stop_queue(hdev,
2088 mmTPC3_CMDQ_GLBL_CFG1,
2089 mmTPC3_CMDQ_CP_STS,
2090 mmTPC3_CMDQ_GLBL_STS0);
2091
2092 if (rc) {
2093 dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
2094 retval = -EIO;
2095 }
2096
2097 rc = goya_stop_queue(hdev,
2098 mmTPC4_QM_GLBL_CFG1,
2099 mmTPC4_QM_CP_STS,
2100 mmTPC4_QM_GLBL_STS0);
2101
2102 if (rc) {
2103 dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
2104 retval = -EIO;
2105 }
2106
2107 rc = goya_stop_queue(hdev,
2108 mmTPC4_CMDQ_GLBL_CFG1,
2109 mmTPC4_CMDQ_CP_STS,
2110 mmTPC4_CMDQ_GLBL_STS0);
2111
2112 if (rc) {
2113 dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
2114 retval = -EIO;
2115 }
2116
2117 rc = goya_stop_queue(hdev,
2118 mmTPC5_QM_GLBL_CFG1,
2119 mmTPC5_QM_CP_STS,
2120 mmTPC5_QM_GLBL_STS0);
2121
2122 if (rc) {
2123 dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
2124 retval = -EIO;
2125 }
2126
2127 rc = goya_stop_queue(hdev,
2128 mmTPC5_CMDQ_GLBL_CFG1,
2129 mmTPC5_CMDQ_CP_STS,
2130 mmTPC5_CMDQ_GLBL_STS0);
2131
2132 if (rc) {
2133 dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
2134 retval = -EIO;
2135 }
2136
2137 rc = goya_stop_queue(hdev,
2138 mmTPC6_QM_GLBL_CFG1,
2139 mmTPC6_QM_CP_STS,
2140 mmTPC6_QM_GLBL_STS0);
2141
2142 if (rc) {
2143 dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
2144 retval = -EIO;
2145 }
2146
2147 rc = goya_stop_queue(hdev,
2148 mmTPC6_CMDQ_GLBL_CFG1,
2149 mmTPC6_CMDQ_CP_STS,
2150 mmTPC6_CMDQ_GLBL_STS0);
2151
2152 if (rc) {
2153 dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
2154 retval = -EIO;
2155 }
2156
2157 rc = goya_stop_queue(hdev,
2158 mmTPC7_QM_GLBL_CFG1,
2159 mmTPC7_QM_CP_STS,
2160 mmTPC7_QM_GLBL_STS0);
2161
2162 if (rc) {
2163 dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
2164 retval = -EIO;
2165 }
2166
2167 rc = goya_stop_queue(hdev,
2168 mmTPC7_CMDQ_GLBL_CFG1,
2169 mmTPC7_CMDQ_CP_STS,
2170 mmTPC7_CMDQ_GLBL_STS0);
2171
2172 if (rc) {
2173 dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
2174 retval = -EIO;
2175 }
2176
2177 return retval;
2178}
2179
2180static void goya_resume_internal_queues(struct hl_device *hdev)
2181{
2182 WREG32(mmMME_QM_GLBL_CFG1, 0);
2183 WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
2184
2185 WREG32(mmTPC0_QM_GLBL_CFG1, 0);
2186 WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
2187
2188 WREG32(mmTPC1_QM_GLBL_CFG1, 0);
2189 WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
2190
2191 WREG32(mmTPC2_QM_GLBL_CFG1, 0);
2192 WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
2193
2194 WREG32(mmTPC3_QM_GLBL_CFG1, 0);
2195 WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
2196
2197 WREG32(mmTPC4_QM_GLBL_CFG1, 0);
2198 WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
2199
2200 WREG32(mmTPC5_QM_GLBL_CFG1, 0);
2201 WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
2202
2203 WREG32(mmTPC6_QM_GLBL_CFG1, 0);
2204 WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
2205
2206 WREG32(mmTPC7_QM_GLBL_CFG1, 0);
2207 WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
2208}
2209
Oded Gabbay1251f232019-02-16 00:39:18 +02002210static void goya_dma_stall(struct hl_device *hdev)
2211{
2212 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
2213 WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
2214 WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
2215 WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
2216 WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
2217}
2218
2219static void goya_tpc_stall(struct hl_device *hdev)
2220{
2221 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2222 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
2223 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
2224 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
2225 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
2226 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
2227 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
2228 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
2229}
2230
2231static void goya_mme_stall(struct hl_device *hdev)
2232{
2233 WREG32(mmMME_STALL, 0xFFFFFFFF);
2234}
2235
2236static int goya_enable_msix(struct hl_device *hdev)
2237{
2238 struct goya_device *goya = hdev->asic_specific;
2239 int cq_cnt = hdev->asic_prop.completion_queues_count;
2240 int rc, i, irq_cnt_init, irq;
2241
2242 if (goya->hw_cap_initialized & HW_CAP_MSIX)
2243 return 0;
2244
2245 rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
2246 GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
2247 if (rc < 0) {
2248 dev_err(hdev->dev,
2249 "MSI-X: Failed to enable support -- %d/%d\n",
2250 GOYA_MSIX_ENTRIES, rc);
2251 return rc;
2252 }
2253
2254 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
2255 irq = pci_irq_vector(hdev->pdev, i);
2256 rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
2257 &hdev->completion_queue[i]);
2258 if (rc) {
2259 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2260 goto free_irqs;
2261 }
2262 }
2263
2264 irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
2265
2266 rc = request_irq(irq, hl_irq_handler_eq, 0,
2267 goya_irq_name[EVENT_QUEUE_MSIX_IDX],
2268 &hdev->event_queue);
2269 if (rc) {
2270 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2271 goto free_irqs;
2272 }
2273
2274 goya->hw_cap_initialized |= HW_CAP_MSIX;
2275 return 0;
2276
2277free_irqs:
2278 for (i = 0 ; i < irq_cnt_init ; i++)
2279 free_irq(pci_irq_vector(hdev->pdev, i),
2280 &hdev->completion_queue[i]);
2281
2282 pci_free_irq_vectors(hdev->pdev);
2283 return rc;
2284}
2285
2286static void goya_sync_irqs(struct hl_device *hdev)
2287{
2288 struct goya_device *goya = hdev->asic_specific;
2289 int i;
2290
2291 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2292 return;
2293
2294 /* Wait for all pending IRQs to be finished */
2295 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2296 synchronize_irq(pci_irq_vector(hdev->pdev, i));
2297
2298 synchronize_irq(pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX));
2299}
2300
2301static void goya_disable_msix(struct hl_device *hdev)
2302{
2303 struct goya_device *goya = hdev->asic_specific;
2304 int i, irq;
2305
2306 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2307 return;
2308
2309 goya_sync_irqs(hdev);
2310
2311 irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
2312 free_irq(irq, &hdev->event_queue);
2313
2314 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2315 irq = pci_irq_vector(hdev->pdev, i);
2316 free_irq(irq, &hdev->completion_queue[i]);
2317 }
2318
2319 pci_free_irq_vectors(hdev->pdev);
2320
2321 goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2322}
2323
2324static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2325{
2326 u32 wait_timeout_ms, cpu_timeout_ms;
2327
2328 dev_info(hdev->dev,
2329 "Halting compute engines and disabling interrupts\n");
2330
2331 if (hdev->pldm) {
2332 wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2333 cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2334 } else {
2335 wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2336 cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2337 }
2338
2339 if (hard_reset) {
2340 /*
2341 * I don't know what is the state of the CPU so make sure it is
2342 * stopped in any means necessary
2343 */
2344 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2345 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2346 GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2347 msleep(cpu_timeout_ms);
2348 }
2349
2350 goya_stop_external_queues(hdev);
2351 goya_stop_internal_queues(hdev);
2352
2353 msleep(wait_timeout_ms);
2354
2355 goya_dma_stall(hdev);
2356 goya_tpc_stall(hdev);
2357 goya_mme_stall(hdev);
2358
2359 msleep(wait_timeout_ms);
2360
2361 goya_disable_external_queues(hdev);
2362 goya_disable_internal_queues(hdev);
2363
2364 if (hard_reset)
2365 goya_disable_msix(hdev);
2366 else
2367 goya_sync_irqs(hdev);
2368}
Oded Gabbay839c4802019-02-16 00:39:16 +02002369
2370/*
2371 * goya_push_fw_to_device - Push FW code to device
2372 *
2373 * @hdev: pointer to hl_device structure
2374 *
2375 * Copy fw code from firmware file to device memory.
2376 * Returns 0 on success
2377 *
2378 */
2379static int goya_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
2380 void __iomem *dst)
2381{
2382 const struct firmware *fw;
2383 const u64 *fw_data;
2384 size_t fw_size, i;
2385 int rc;
2386
2387 rc = request_firmware(&fw, fw_name, hdev->dev);
2388
2389 if (rc) {
2390 dev_err(hdev->dev, "Failed to request %s\n", fw_name);
2391 goto out;
2392 }
2393
2394 fw_size = fw->size;
2395 if ((fw_size % 4) != 0) {
2396 dev_err(hdev->dev, "illegal %s firmware size %zu\n",
2397 fw_name, fw_size);
2398 rc = -EINVAL;
2399 goto out;
2400 }
2401
2402 dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
2403
2404 fw_data = (const u64 *) fw->data;
2405
2406 if ((fw->size % 8) != 0)
2407 fw_size -= 8;
2408
2409 for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
2410 if (!(i & (0x80000 - 1))) {
2411 dev_dbg(hdev->dev,
2412 "copied so far %zu out of %zu for %s firmware",
2413 i, fw_size, fw_name);
2414 usleep_range(20, 100);
2415 }
2416
2417 writeq(*fw_data, dst);
2418 }
2419
2420 if ((fw->size % 8) != 0)
2421 writel(*(const u32 *) fw_data, dst);
2422
2423out:
2424 release_firmware(fw);
2425 return rc;
2426}
2427
2428static int goya_pldm_init_cpu(struct hl_device *hdev)
2429{
2430 char fw_name[200];
2431 void __iomem *dst;
2432 u32 val, unit_rst_val;
2433 int rc;
2434
2435 /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
2436 goya_init_golden_registers(hdev);
2437
2438 /* Put ARM cores into reset */
2439 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
2440 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2441
2442 /* Reset the CA53 MACRO */
2443 unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2444 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
2445 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2446 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
2447 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2448
2449 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
2450 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
2451 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2452 if (rc)
2453 return rc;
2454
2455 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2456 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2457 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2458 if (rc)
2459 return rc;
2460
2461 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2462 WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA);
2463
2464 WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0,
2465 lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2466 WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0,
2467 upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2468
2469 /* Release ARM core 0 from reset */
2470 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
2471 CPU_RESET_CORE0_DEASSERT);
2472 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2473
2474 return 0;
2475}
2476
2477/*
2478 * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
2479 * The version string should be located by that offset.
2480 */
2481static void goya_read_device_fw_version(struct hl_device *hdev,
2482 enum goya_fw_component fwc)
2483{
2484 const char *name;
2485 u32 ver_off;
2486 char *dest;
2487
2488 switch (fwc) {
2489 case FW_COMP_UBOOT:
2490 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29);
2491 dest = hdev->asic_prop.uboot_ver;
2492 name = "U-Boot";
2493 break;
2494 case FW_COMP_PREBOOT:
2495 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28);
2496 dest = hdev->asic_prop.preboot_ver;
2497 name = "Preboot";
2498 break;
2499 default:
2500 dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2501 return;
2502 }
2503
2504 ver_off &= ~((u32)SRAM_BASE_ADDR);
2505
2506 if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
2507 memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
2508 VERSION_MAX_LEN);
2509 } else {
2510 dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
2511 name, ver_off);
2512 strcpy(dest, "unavailable");
2513 }
2514}
2515
2516static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
2517{
2518 struct goya_device *goya = hdev->asic_specific;
2519 char fw_name[200];
2520 void __iomem *dst;
2521 u32 status;
2522 int rc;
2523
2524 if (!hdev->cpu_enable)
2525 return 0;
2526
2527 if (goya->hw_cap_initialized & HW_CAP_CPU)
2528 return 0;
2529
2530 /*
2531 * Before pushing u-boot/linux to device, need to set the ddr bar to
2532 * base address of dram
2533 */
2534 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2535 if (rc) {
2536 dev_err(hdev->dev,
2537 "failed to map DDR bar to DRAM base address\n");
2538 return rc;
2539 }
2540
2541 if (hdev->pldm) {
2542 rc = goya_pldm_init_cpu(hdev);
2543 if (rc)
2544 return rc;
2545
2546 goto out;
2547 }
2548
2549 /* Make sure CPU boot-loader is running */
2550 rc = hl_poll_timeout(
2551 hdev,
2552 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2553 status,
2554 (status == CPU_BOOT_STATUS_DRAM_RDY) ||
2555 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2556 10000,
2557 cpu_timeout);
2558
2559 if (rc) {
2560 dev_err(hdev->dev, "Error in ARM u-boot!");
2561 switch (status) {
2562 case CPU_BOOT_STATUS_NA:
2563 dev_err(hdev->dev,
2564 "ARM status %d - BTL did NOT run\n", status);
2565 break;
2566 case CPU_BOOT_STATUS_IN_WFE:
2567 dev_err(hdev->dev,
2568 "ARM status %d - Inside WFE loop\n", status);
2569 break;
2570 case CPU_BOOT_STATUS_IN_BTL:
2571 dev_err(hdev->dev,
2572 "ARM status %d - Stuck in BTL\n", status);
2573 break;
2574 case CPU_BOOT_STATUS_IN_PREBOOT:
2575 dev_err(hdev->dev,
2576 "ARM status %d - Stuck in Preboot\n", status);
2577 break;
2578 case CPU_BOOT_STATUS_IN_SPL:
2579 dev_err(hdev->dev,
2580 "ARM status %d - Stuck in SPL\n", status);
2581 break;
2582 case CPU_BOOT_STATUS_IN_UBOOT:
2583 dev_err(hdev->dev,
2584 "ARM status %d - Stuck in u-boot\n", status);
2585 break;
2586 case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
2587 dev_err(hdev->dev,
2588 "ARM status %d - DDR initialization failed\n",
2589 status);
2590 break;
2591 default:
2592 dev_err(hdev->dev,
2593 "ARM status %d - Invalid status code\n",
2594 status);
2595 break;
2596 }
2597 return -EIO;
2598 }
2599
2600 /* Read U-Boot version now in case we will later fail */
2601 goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
2602 goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
2603
2604 if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
2605 goto out;
2606
2607 if (!hdev->fw_loading) {
2608 dev_info(hdev->dev, "Skip loading FW\n");
2609 goto out;
2610 }
2611
2612 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2613 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2614 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2615 if (rc)
2616 return rc;
2617
2618 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2619
2620 rc = hl_poll_timeout(
2621 hdev,
2622 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2623 status,
2624 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2625 10000,
2626 cpu_timeout);
2627
2628 if (rc) {
2629 if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
2630 dev_err(hdev->dev,
2631 "ARM u-boot reports FIT image is corrupted\n");
2632 else
2633 dev_err(hdev->dev,
2634 "ARM Linux failed to load, %d\n", status);
2635 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA);
2636 return -EIO;
2637 }
2638
2639 dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2640
2641out:
2642 goya->hw_cap_initialized |= HW_CAP_CPU;
2643
2644 return 0;
2645}
2646
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002647static int goya_mmu_init(struct hl_device *hdev)
2648{
2649 struct asic_fixed_properties *prop = &hdev->asic_prop;
2650 struct goya_device *goya = hdev->asic_specific;
2651 u64 hop0_addr;
2652 int rc, i;
2653
2654 if (!hdev->mmu_enable)
2655 return 0;
2656
2657 if (goya->hw_cap_initialized & HW_CAP_MMU)
2658 return 0;
2659
2660 hdev->dram_supports_virtual_memory = true;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02002661 hdev->dram_default_page_mapping = true;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002662
2663 for (i = 0 ; i < prop->max_asid ; i++) {
2664 hop0_addr = prop->mmu_pgt_addr +
2665 (i * prop->mmu_hop_table_size);
2666
2667 rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2668 if (rc) {
2669 dev_err(hdev->dev,
2670 "failed to set hop0 addr for asid %d\n", i);
2671 goto err;
2672 }
2673 }
2674
2675 goya->hw_cap_initialized |= HW_CAP_MMU;
2676
2677 /* init MMU cache manage page */
Oded Gabbay1e7c1ec2019-02-28 10:46:13 +02002678 WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2679 lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2680 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002681
2682 /* Remove follower feature due to performance bug */
2683 WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2684 (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2685
2686 hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
2687
2688 WREG32(mmMMU_MMU_ENABLE, 1);
2689 WREG32(mmMMU_SPI_MASK, 0xF);
2690
2691 return 0;
2692
2693err:
2694 return rc;
2695}
2696
Oded Gabbay839c4802019-02-16 00:39:16 +02002697/*
2698 * goya_hw_init - Goya hardware initialization code
2699 *
2700 * @hdev: pointer to hl_device structure
2701 *
2702 * Returns 0 on success
2703 *
2704 */
2705static int goya_hw_init(struct hl_device *hdev)
2706{
2707 struct asic_fixed_properties *prop = &hdev->asic_prop;
2708 u32 val;
2709 int rc;
2710
2711 dev_info(hdev->dev, "Starting initialization of H/W\n");
2712
2713 /* Perform read from the device to make sure device is up */
2714 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2715
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02002716 /*
2717 * Let's mark in the H/W that we have reached this point. We check
2718 * this value in the reset_before_init function to understand whether
2719 * we need to reset the chip before doing H/W init. This register is
2720 * cleared by the H/W upon H/W reset
2721 */
2722 WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY);
2723
Oded Gabbay839c4802019-02-16 00:39:16 +02002724 rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
2725 if (rc) {
2726 dev_err(hdev->dev, "failed to initialize CPU\n");
2727 return rc;
2728 }
2729
2730 goya_tpc_mbist_workaround(hdev);
2731
2732 goya_init_golden_registers(hdev);
2733
2734 /*
2735 * After CPU initialization is finished, change DDR bar mapping inside
2736 * iATU to point to the start address of the MMU page tables
2737 */
2738 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
2739 (MMU_PAGE_TABLES_ADDR & ~(prop->dram_pci_bar_size - 0x1ull)));
2740 if (rc) {
2741 dev_err(hdev->dev,
2742 "failed to map DDR bar to MMU page tables\n");
2743 return rc;
2744 }
2745
Omer Shpigelman0feaf862019-02-16 00:39:22 +02002746 rc = goya_mmu_init(hdev);
2747 if (rc)
2748 return rc;
2749
Oded Gabbay839c4802019-02-16 00:39:16 +02002750 goya_init_security(hdev);
2751
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002752 goya_init_dma_qmans(hdev);
2753
2754 goya_init_mme_qmans(hdev);
2755
2756 goya_init_tpc_qmans(hdev);
2757
Oded Gabbay1251f232019-02-16 00:39:18 +02002758 /* MSI-X must be enabled before CPU queues are initialized */
2759 rc = goya_enable_msix(hdev);
2760 if (rc)
2761 goto disable_queues;
2762
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002763 rc = goya_init_cpu_queues(hdev);
2764 if (rc) {
2765 dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
2766 rc);
Oded Gabbay1251f232019-02-16 00:39:18 +02002767 goto disable_msix;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002768 }
2769
Oded Gabbay839c4802019-02-16 00:39:16 +02002770 /* CPU initialization is finished, we can now move to 48 bit DMA mask */
2771 rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
2772 if (rc) {
2773 dev_warn(hdev->dev, "Unable to set pci dma mask to 48 bits\n");
2774 rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
2775 if (rc) {
2776 dev_err(hdev->dev,
2777 "Unable to set pci dma mask to 32 bits\n");
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002778 goto disable_pci_access;
Oded Gabbay839c4802019-02-16 00:39:16 +02002779 }
2780 }
2781
2782 rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
2783 if (rc) {
2784 dev_warn(hdev->dev,
2785 "Unable to set pci consistent dma mask to 48 bits\n");
2786 rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
2787 if (rc) {
2788 dev_err(hdev->dev,
2789 "Unable to set pci consistent dma mask to 32 bits\n");
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002790 goto disable_pci_access;
Oded Gabbay839c4802019-02-16 00:39:16 +02002791 }
2792 }
2793
2794 /* Perform read from the device to flush all MSI-X configuration */
2795 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2796
2797 return 0;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002798
2799disable_pci_access:
2800 goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
Oded Gabbay1251f232019-02-16 00:39:18 +02002801disable_msix:
2802 goya_disable_msix(hdev);
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002803disable_queues:
2804 goya_disable_internal_queues(hdev);
2805 goya_disable_external_queues(hdev);
2806
2807 return rc;
Oded Gabbay839c4802019-02-16 00:39:16 +02002808}
2809
2810/*
2811 * goya_hw_fini - Goya hardware tear-down code
2812 *
2813 * @hdev: pointer to hl_device structure
2814 * @hard_reset: should we do hard reset to all engines or just reset the
2815 * compute/dma engines
2816 */
2817static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
2818{
2819 struct goya_device *goya = hdev->asic_specific;
2820 u32 reset_timeout_ms, status;
2821
2822 if (hdev->pldm)
2823 reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2824 else
2825 reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2826
2827 if (hard_reset) {
2828 goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2829 goya_disable_clk_rlx(hdev);
2830 goya_set_pll_refclk(hdev);
2831
2832 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2833 dev_info(hdev->dev,
2834 "Issued HARD reset command, going to wait %dms\n",
2835 reset_timeout_ms);
2836 } else {
2837 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2838 dev_info(hdev->dev,
2839 "Issued SOFT reset command, going to wait %dms\n",
2840 reset_timeout_ms);
2841 }
2842
2843 /*
2844 * After hard reset, we can't poll the BTM_FSM register because the PSOC
2845 * itself is in reset. In either reset we need to wait until the reset
2846 * is deasserted
2847 */
2848 msleep(reset_timeout_ms);
2849
2850 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2851 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
2852 dev_err(hdev->dev,
2853 "Timeout while waiting for device to reset 0x%x\n",
2854 status);
2855
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02002856 if (!hard_reset) {
2857 goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2858 HW_CAP_GOLDEN | HW_CAP_TPC);
2859 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2860 GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2861 return;
2862 }
2863
Oded Gabbay839c4802019-02-16 00:39:16 +02002864 /* Chicken bit to re-initiate boot sequencer flow */
2865 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2866 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2867 /* Move boot manager FSM to pre boot sequencer init state */
2868 WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2869 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2870
2871 goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2872 HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2873 HW_CAP_DMA | HW_CAP_MME |
2874 HW_CAP_MMU | HW_CAP_TPC_MBIST |
2875 HW_CAP_GOLDEN | HW_CAP_TPC);
Oded Gabbay1251f232019-02-16 00:39:18 +02002876 memset(goya->events_stat, 0, sizeof(goya->events_stat));
Oded Gabbay839c4802019-02-16 00:39:16 +02002877
2878 if (!hdev->pldm) {
2879 int rc;
2880 /* In case we are running inside VM and the VM is
2881 * shutting down, we need to make sure CPU boot-loader
2882 * is running before we can continue the VM shutdown.
2883 * That is because the VM will send an FLR signal that
2884 * we must answer
2885 */
2886 dev_info(hdev->dev,
2887 "Going to wait up to %ds for CPU boot loader\n",
2888 GOYA_CPU_TIMEOUT_USEC / 1000 / 1000);
2889
2890 rc = hl_poll_timeout(
2891 hdev,
2892 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2893 status,
2894 (status == CPU_BOOT_STATUS_DRAM_RDY),
2895 10000,
2896 GOYA_CPU_TIMEOUT_USEC);
2897 if (rc)
2898 dev_err(hdev->dev,
2899 "failed to wait for CPU boot loader\n");
2900 }
2901}
2902
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002903int goya_suspend(struct hl_device *hdev)
2904{
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002905 int rc;
2906
2907 rc = goya_stop_internal_queues(hdev);
2908
2909 if (rc) {
2910 dev_err(hdev->dev, "failed to stop internal queues\n");
2911 return rc;
2912 }
2913
2914 rc = goya_stop_external_queues(hdev);
2915
2916 if (rc) {
2917 dev_err(hdev->dev, "failed to stop external queues\n");
2918 return rc;
2919 }
2920
2921 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2922 if (rc)
2923 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2924
2925 return rc;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002926}
2927
2928int goya_resume(struct hl_device *hdev)
2929{
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002930 int rc;
2931
2932 goya_resume_external_queues(hdev);
2933 goya_resume_internal_queues(hdev);
2934
2935 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
2936 if (rc)
2937 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
2938 return rc;
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02002939}
2940
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002941static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
Oded Gabbaybe5d9262019-02-16 00:39:15 +02002942 u64 kaddress, phys_addr_t paddress, u32 size)
2943{
2944 int rc;
2945
2946 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2947 VM_DONTCOPY | VM_NORESERVE;
2948
2949 rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
2950 size, vma->vm_page_prot);
2951 if (rc)
2952 dev_err(hdev->dev, "remap_pfn_range error %d", rc);
2953
2954 return rc;
2955}
2956
Oded Gabbay5e6e0232019-02-27 12:15:16 +02002957static void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02002958{
2959 u32 db_reg_offset, db_value;
2960 bool invalid_queue = false;
2961
2962 switch (hw_queue_id) {
2963 case GOYA_QUEUE_ID_DMA_0:
2964 db_reg_offset = mmDMA_QM_0_PQ_PI;
2965 break;
2966
2967 case GOYA_QUEUE_ID_DMA_1:
2968 db_reg_offset = mmDMA_QM_1_PQ_PI;
2969 break;
2970
2971 case GOYA_QUEUE_ID_DMA_2:
2972 db_reg_offset = mmDMA_QM_2_PQ_PI;
2973 break;
2974
2975 case GOYA_QUEUE_ID_DMA_3:
2976 db_reg_offset = mmDMA_QM_3_PQ_PI;
2977 break;
2978
2979 case GOYA_QUEUE_ID_DMA_4:
2980 db_reg_offset = mmDMA_QM_4_PQ_PI;
2981 break;
2982
2983 case GOYA_QUEUE_ID_CPU_PQ:
2984 if (hdev->cpu_queues_enable)
2985 db_reg_offset = mmCPU_IF_PF_PQ_PI;
2986 else
2987 invalid_queue = true;
2988 break;
2989
2990 case GOYA_QUEUE_ID_MME:
2991 db_reg_offset = mmMME_QM_PQ_PI;
2992 break;
2993
2994 case GOYA_QUEUE_ID_TPC0:
2995 db_reg_offset = mmTPC0_QM_PQ_PI;
2996 break;
2997
2998 case GOYA_QUEUE_ID_TPC1:
2999 db_reg_offset = mmTPC1_QM_PQ_PI;
3000 break;
3001
3002 case GOYA_QUEUE_ID_TPC2:
3003 db_reg_offset = mmTPC2_QM_PQ_PI;
3004 break;
3005
3006 case GOYA_QUEUE_ID_TPC3:
3007 db_reg_offset = mmTPC3_QM_PQ_PI;
3008 break;
3009
3010 case GOYA_QUEUE_ID_TPC4:
3011 db_reg_offset = mmTPC4_QM_PQ_PI;
3012 break;
3013
3014 case GOYA_QUEUE_ID_TPC5:
3015 db_reg_offset = mmTPC5_QM_PQ_PI;
3016 break;
3017
3018 case GOYA_QUEUE_ID_TPC6:
3019 db_reg_offset = mmTPC6_QM_PQ_PI;
3020 break;
3021
3022 case GOYA_QUEUE_ID_TPC7:
3023 db_reg_offset = mmTPC7_QM_PQ_PI;
3024 break;
3025
3026 default:
3027 invalid_queue = true;
3028 }
3029
3030 if (invalid_queue) {
3031 /* Should never get here */
3032 dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
3033 hw_queue_id);
3034 return;
3035 }
3036
3037 db_value = pi;
3038
3039 /* ring the doorbell */
3040 WREG32(db_reg_offset, db_value);
3041
3042 if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
3043 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
3044 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
3045}
3046
3047void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
3048{
3049 /* Not needed in Goya */
3050}
3051
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003052static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02003053 dma_addr_t *dma_handle, gfp_t flags)
3054{
3055 return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags);
3056}
3057
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003058static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
3059 void *cpu_addr, dma_addr_t dma_handle)
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02003060{
3061 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle);
3062}
3063
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003064void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
3065 dma_addr_t *dma_handle, u16 *queue_len)
3066{
3067 void *base;
3068 u32 offset;
3069
3070 *dma_handle = hdev->asic_prop.sram_base_address;
3071
3072 base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
3073
3074 switch (queue_id) {
3075 case GOYA_QUEUE_ID_MME:
3076 offset = MME_QMAN_BASE_OFFSET;
3077 *queue_len = MME_QMAN_LENGTH;
3078 break;
3079 case GOYA_QUEUE_ID_TPC0:
3080 offset = TPC0_QMAN_BASE_OFFSET;
3081 *queue_len = TPC_QMAN_LENGTH;
3082 break;
3083 case GOYA_QUEUE_ID_TPC1:
3084 offset = TPC1_QMAN_BASE_OFFSET;
3085 *queue_len = TPC_QMAN_LENGTH;
3086 break;
3087 case GOYA_QUEUE_ID_TPC2:
3088 offset = TPC2_QMAN_BASE_OFFSET;
3089 *queue_len = TPC_QMAN_LENGTH;
3090 break;
3091 case GOYA_QUEUE_ID_TPC3:
3092 offset = TPC3_QMAN_BASE_OFFSET;
3093 *queue_len = TPC_QMAN_LENGTH;
3094 break;
3095 case GOYA_QUEUE_ID_TPC4:
3096 offset = TPC4_QMAN_BASE_OFFSET;
3097 *queue_len = TPC_QMAN_LENGTH;
3098 break;
3099 case GOYA_QUEUE_ID_TPC5:
3100 offset = TPC5_QMAN_BASE_OFFSET;
3101 *queue_len = TPC_QMAN_LENGTH;
3102 break;
3103 case GOYA_QUEUE_ID_TPC6:
3104 offset = TPC6_QMAN_BASE_OFFSET;
3105 *queue_len = TPC_QMAN_LENGTH;
3106 break;
3107 case GOYA_QUEUE_ID_TPC7:
3108 offset = TPC7_QMAN_BASE_OFFSET;
3109 *queue_len = TPC_QMAN_LENGTH;
3110 break;
3111 default:
3112 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
3113 return NULL;
3114 }
3115
3116 base += offset;
3117 *dma_handle += offset;
3118
3119 return base;
3120}
3121
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003122static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003123{
3124 struct goya_device *goya = hdev->asic_specific;
3125 struct packet_msg_prot *fence_pkt;
3126 u32 *fence_ptr;
3127 dma_addr_t fence_dma_addr;
3128 struct hl_cb *cb;
3129 u32 tmp;
3130 int rc;
3131
3132 if (!hdev->asic_funcs->is_device_idle(hdev)) {
3133 dev_err_ratelimited(hdev->dev,
3134 "Can't send KMD job on QMAN0 if device is not idle\n");
3135 return -EFAULT;
3136 }
3137
3138 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3139 &fence_dma_addr);
3140 if (!fence_ptr) {
3141 dev_err(hdev->dev,
3142 "Failed to allocate fence memory for QMAN0\n");
3143 return -ENOMEM;
3144 }
3145
3146 *fence_ptr = 0;
3147
3148 if (goya->hw_cap_initialized & HW_CAP_MMU) {
3149 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
3150 RREG32(mmDMA_QM_0_GLBL_PROT);
3151 }
3152
3153 /*
3154 * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For
3155 * synchronized kernel jobs we only need space for 1 packet_msg_prot
3156 */
3157 job->job_cb_size -= sizeof(struct packet_msg_prot);
3158
3159 cb = job->patched_cb;
3160
3161 fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
3162 job->job_cb_size - sizeof(struct packet_msg_prot));
3163
3164 fence_pkt->ctl = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3165 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3166 (1 << GOYA_PKT_CTL_MB_SHIFT);
3167 fence_pkt->value = GOYA_QMAN0_FENCE_VAL;
3168 fence_pkt->addr = fence_dma_addr +
3169 hdev->asic_prop.host_phys_base_address;
3170
3171 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
3172 job->job_cb_size, cb->bus_address);
3173 if (rc) {
3174 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
3175 goto free_fence_ptr;
3176 }
3177
3178 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr,
3179 HL_DEVICE_TIMEOUT_USEC, &tmp);
3180
3181 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
3182
3183 if ((rc) || (tmp != GOYA_QMAN0_FENCE_VAL)) {
3184 dev_err(hdev->dev, "QMAN0 Job hasn't finished in time\n");
3185 rc = -ETIMEDOUT;
3186 }
3187
3188free_fence_ptr:
3189 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
3190 fence_dma_addr);
3191
3192 if (goya->hw_cap_initialized & HW_CAP_MMU) {
3193 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
3194 RREG32(mmDMA_QM_0_GLBL_PROT);
3195 }
3196
3197 return rc;
3198}
3199
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003200int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
3201 u32 timeout, long *result)
3202{
3203 struct goya_device *goya = hdev->asic_specific;
3204 struct armcp_packet *pkt;
3205 dma_addr_t pkt_dma_addr;
3206 u32 tmp;
3207 int rc = 0;
3208
3209 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
3210 if (result)
3211 *result = 0;
3212 return 0;
3213 }
3214
3215 if (len > CPU_CB_SIZE) {
3216 dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
3217 len);
3218 return -ENOMEM;
3219 }
3220
3221 pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
3222 &pkt_dma_addr);
3223 if (!pkt) {
3224 dev_err(hdev->dev,
3225 "Failed to allocate DMA memory for packet to CPU\n");
3226 return -ENOMEM;
3227 }
3228
3229 memcpy(pkt, msg, len);
3230
3231 mutex_lock(&hdev->send_cpu_message_lock);
3232
3233 if (hdev->disabled)
3234 goto out;
3235
Oded Gabbaya28ce422019-02-28 10:46:12 +02003236 if (hdev->device_cpu_disabled) {
3237 rc = -EIO;
3238 goto out;
3239 }
3240
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003241 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_CPU_PQ, len,
3242 pkt_dma_addr);
3243 if (rc) {
3244 dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
3245 goto out;
3246 }
3247
3248 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
3249 timeout, &tmp);
3250
3251 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_CPU_PQ);
3252
3253 if (rc == -ETIMEDOUT) {
Oded Gabbaya28ce422019-02-28 10:46:12 +02003254 dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
3255 hdev->device_cpu_disabled = true;
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003256 goto out;
3257 }
3258
3259 if (tmp == ARMCP_PACKET_FENCE_VAL) {
3260 rc = (pkt->ctl & ARMCP_PKT_CTL_RC_MASK) >>
3261 ARMCP_PKT_CTL_RC_SHIFT;
3262 if (rc) {
3263 dev_err(hdev->dev,
3264 "F/W ERROR %d for CPU packet %d\n",
3265 rc, (pkt->ctl & ARMCP_PKT_CTL_OPCODE_MASK)
3266 >> ARMCP_PKT_CTL_OPCODE_SHIFT);
3267 rc = -EINVAL;
3268 } else if (result) {
3269 *result = pkt->result;
3270 }
3271 } else {
3272 dev_err(hdev->dev, "CPU packet wrong fence value\n");
3273 rc = -EINVAL;
3274 }
3275
3276out:
3277 mutex_unlock(&hdev->send_cpu_message_lock);
3278
3279 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
3280
3281 return rc;
3282}
3283
3284int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3285{
3286 struct packet_msg_prot *fence_pkt;
3287 dma_addr_t pkt_dma_addr;
3288 u32 fence_val, tmp;
3289 dma_addr_t fence_dma_addr;
3290 u32 *fence_ptr;
3291 int rc;
3292
3293 fence_val = GOYA_QMAN0_FENCE_VAL;
3294
3295 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3296 &fence_dma_addr);
3297 if (!fence_ptr) {
3298 dev_err(hdev->dev,
3299 "Failed to allocate memory for queue testing\n");
3300 return -ENOMEM;
3301 }
3302
3303 *fence_ptr = 0;
3304
3305 fence_pkt = hdev->asic_funcs->dma_pool_zalloc(hdev,
3306 sizeof(struct packet_msg_prot),
3307 GFP_KERNEL, &pkt_dma_addr);
3308 if (!fence_pkt) {
3309 dev_err(hdev->dev,
3310 "Failed to allocate packet for queue testing\n");
3311 rc = -ENOMEM;
3312 goto free_fence_ptr;
3313 }
3314
3315 fence_pkt->ctl = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3316 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3317 (1 << GOYA_PKT_CTL_MB_SHIFT);
3318 fence_pkt->value = fence_val;
3319 fence_pkt->addr = fence_dma_addr +
3320 hdev->asic_prop.host_phys_base_address;
3321
3322 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
3323 sizeof(struct packet_msg_prot),
3324 pkt_dma_addr);
3325 if (rc) {
3326 dev_err(hdev->dev,
3327 "Failed to send fence packet\n");
3328 goto free_pkt;
3329 }
3330
3331 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr,
3332 GOYA_TEST_QUEUE_WAIT_USEC, &tmp);
3333
3334 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
3335
3336 if ((!rc) && (tmp == fence_val)) {
3337 dev_info(hdev->dev,
3338 "queue test on H/W queue %d succeeded\n",
3339 hw_queue_id);
3340 } else {
3341 dev_err(hdev->dev,
3342 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
3343 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
3344 rc = -EINVAL;
3345 }
3346
3347free_pkt:
3348 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_pkt,
3349 pkt_dma_addr);
3350free_fence_ptr:
3351 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
3352 fence_dma_addr);
3353 return rc;
3354}
3355
3356int goya_test_cpu_queue(struct hl_device *hdev)
3357{
3358 struct armcp_packet test_pkt;
3359 long result;
3360 int rc;
3361
3362 /* cpu_queues_enable flag is always checked in send cpu message */
3363
3364 memset(&test_pkt, 0, sizeof(test_pkt));
3365
3366 test_pkt.ctl = ARMCP_PACKET_TEST << ARMCP_PKT_CTL_OPCODE_SHIFT;
3367 test_pkt.value = ARMCP_PACKET_FENCE_VAL;
3368
3369 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
3370 sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
3371
Oded Gabbaya507fbb2019-02-22 21:29:58 +02003372 if (!rc) {
3373 if (result == ARMCP_PACKET_FENCE_VAL)
3374 dev_info(hdev->dev,
3375 "queue test on CPU queue succeeded\n");
3376 else
3377 dev_err(hdev->dev,
3378 "CPU queue test failed (0x%08lX)\n", result);
3379 } else {
3380 dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
3381 }
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003382
3383 return rc;
3384}
3385
3386static int goya_test_queues(struct hl_device *hdev)
3387{
3388 struct goya_device *goya = hdev->asic_specific;
3389 int i, rc, ret_val = 0;
3390
3391 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
3392 rc = goya_test_queue(hdev, i);
3393 if (rc)
3394 ret_val = -EINVAL;
3395 }
3396
3397 if (hdev->cpu_queues_enable) {
3398 rc = goya->test_cpu_queue(hdev);
3399 if (rc)
3400 ret_val = -EINVAL;
3401 }
3402
3403 return ret_val;
3404}
3405
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003406static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
3407 gfp_t mem_flags, dma_addr_t *dma_handle)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003408{
3409 if (size > GOYA_DMA_POOL_BLK_SIZE)
3410 return NULL;
3411
3412 return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
3413}
3414
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003415static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
3416 dma_addr_t dma_addr)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003417{
3418 dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
3419}
3420
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003421static void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
3422 size_t size, dma_addr_t *dma_handle)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003423{
3424 u64 kernel_addr;
3425
3426 /* roundup to CPU_PKT_SIZE */
3427 size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
3428
3429 kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
3430
3431 *dma_handle = hdev->cpu_accessible_dma_address +
3432 (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
3433
3434 return (void *) (uintptr_t) kernel_addr;
3435}
3436
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003437static void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev,
3438 size_t size, void *vaddr)
Oded Gabbay9494a8d2019-02-16 00:39:17 +02003439{
3440 /* roundup to CPU_PKT_SIZE */
3441 size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
3442
3443 gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
3444 size);
3445}
3446
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003447static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg,
3448 int nents, enum dma_data_direction dir)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003449{
3450 if (!dma_map_sg(&hdev->pdev->dev, sg, nents, dir))
3451 return -ENOMEM;
3452
3453 return 0;
3454}
3455
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003456static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sg,
3457 int nents, enum dma_data_direction dir)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003458{
3459 dma_unmap_sg(&hdev->pdev->dev, sg, nents, dir);
3460}
3461
Oded Gabbay5e6e0232019-02-27 12:15:16 +02003462u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003463{
3464 struct scatterlist *sg, *sg_next_iter;
Oded Gabbaye99f16832019-02-24 11:55:26 +02003465 u32 count, dma_desc_cnt;
3466 u64 len, len_next;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003467 dma_addr_t addr, addr_next;
3468
3469 dma_desc_cnt = 0;
3470
3471 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3472
3473 len = sg_dma_len(sg);
3474 addr = sg_dma_address(sg);
3475
3476 if (len == 0)
3477 break;
3478
3479 while ((count + 1) < sgt->nents) {
3480 sg_next_iter = sg_next(sg);
3481 len_next = sg_dma_len(sg_next_iter);
3482 addr_next = sg_dma_address(sg_next_iter);
3483
3484 if (len_next == 0)
3485 break;
3486
3487 if ((addr + len == addr_next) &&
3488 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3489 len += len_next;
3490 count++;
3491 sg = sg_next_iter;
3492 } else {
3493 break;
3494 }
3495 }
3496
3497 dma_desc_cnt++;
3498 }
3499
3500 return dma_desc_cnt * sizeof(struct packet_lin_dma);
3501}
3502
3503static int goya_pin_memory_before_cs(struct hl_device *hdev,
3504 struct hl_cs_parser *parser,
3505 struct packet_lin_dma *user_dma_pkt,
3506 u64 addr, enum dma_data_direction dir)
3507{
3508 struct hl_userptr *userptr;
3509 int rc;
3510
3511 if (hl_userptr_is_pinned(hdev, addr, user_dma_pkt->tsize,
3512 parser->job_userptr_list, &userptr))
3513 goto already_pinned;
3514
3515 userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
3516 if (!userptr)
3517 return -ENOMEM;
3518
3519 rc = hl_pin_host_memory(hdev, addr, user_dma_pkt->tsize, userptr);
3520 if (rc)
3521 goto free_userptr;
3522
3523 list_add_tail(&userptr->job_node, parser->job_userptr_list);
3524
3525 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
3526 userptr->sgt->nents, dir);
3527 if (rc) {
3528 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3529 goto unpin_memory;
3530 }
3531
3532 userptr->dma_mapped = true;
3533 userptr->dir = dir;
3534
3535already_pinned:
3536 parser->patched_cb_size +=
3537 goya_get_dma_desc_list_size(hdev, userptr->sgt);
3538
3539 return 0;
3540
3541unpin_memory:
3542 hl_unpin_host_memory(hdev, userptr);
3543free_userptr:
3544 kfree(userptr);
3545 return rc;
3546}
3547
3548static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3549 struct hl_cs_parser *parser,
3550 struct packet_lin_dma *user_dma_pkt)
3551{
3552 u64 device_memory_addr, addr;
3553 enum dma_data_direction dir;
3554 enum goya_dma_direction user_dir;
3555 bool sram_addr = true;
3556 bool skip_host_mem_pin = false;
3557 bool user_memset;
3558 int rc = 0;
3559
3560 user_dir = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3561 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3562
3563 user_memset = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3564 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3565
3566 switch (user_dir) {
3567 case DMA_HOST_TO_DRAM:
3568 dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3569 dir = DMA_TO_DEVICE;
3570 sram_addr = false;
3571 addr = user_dma_pkt->src_addr;
3572 device_memory_addr = user_dma_pkt->dst_addr;
3573 if (user_memset)
3574 skip_host_mem_pin = true;
3575 break;
3576
3577 case DMA_DRAM_TO_HOST:
3578 dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3579 dir = DMA_FROM_DEVICE;
3580 sram_addr = false;
3581 addr = user_dma_pkt->dst_addr;
3582 device_memory_addr = user_dma_pkt->src_addr;
3583 break;
3584
3585 case DMA_HOST_TO_SRAM:
3586 dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3587 dir = DMA_TO_DEVICE;
3588 addr = user_dma_pkt->src_addr;
3589 device_memory_addr = user_dma_pkt->dst_addr;
3590 if (user_memset)
3591 skip_host_mem_pin = true;
3592 break;
3593
3594 case DMA_SRAM_TO_HOST:
3595 dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3596 dir = DMA_FROM_DEVICE;
3597 addr = user_dma_pkt->dst_addr;
3598 device_memory_addr = user_dma_pkt->src_addr;
3599 break;
3600 default:
3601 dev_err(hdev->dev, "DMA direction is undefined\n");
3602 return -EFAULT;
3603 }
3604
3605 if (parser->ctx_id != HL_KERNEL_ASID_ID) {
3606 if (sram_addr) {
3607 if (!hl_mem_area_inside_range(device_memory_addr,
3608 user_dma_pkt->tsize,
3609 hdev->asic_prop.sram_user_base_address,
3610 hdev->asic_prop.sram_end_address)) {
3611
3612 dev_err(hdev->dev,
3613 "SRAM address 0x%llx + 0x%x is invalid\n",
3614 device_memory_addr,
3615 user_dma_pkt->tsize);
3616 return -EFAULT;
3617 }
3618 } else {
3619 if (!hl_mem_area_inside_range(device_memory_addr,
3620 user_dma_pkt->tsize,
3621 hdev->asic_prop.dram_user_base_address,
3622 hdev->asic_prop.dram_end_address)) {
3623
3624 dev_err(hdev->dev,
3625 "DRAM address 0x%llx + 0x%x is invalid\n",
3626 device_memory_addr,
3627 user_dma_pkt->tsize);
3628 return -EFAULT;
3629 }
3630 }
3631 }
3632
3633 if (skip_host_mem_pin)
3634 parser->patched_cb_size += sizeof(*user_dma_pkt);
3635 else {
3636 if ((dir == DMA_TO_DEVICE) &&
3637 (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3638 dev_err(hdev->dev,
3639 "Can't DMA from host on queue other then 1\n");
3640 return -EFAULT;
3641 }
3642
3643 rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3644 addr, dir);
3645 }
3646
3647 return rc;
3648}
3649
3650static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3651 struct hl_cs_parser *parser,
3652 struct packet_lin_dma *user_dma_pkt)
3653{
3654 u64 sram_memory_addr, dram_memory_addr;
3655 enum goya_dma_direction user_dir;
3656
3657 user_dir = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3658 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3659
3660 if (user_dir == DMA_DRAM_TO_SRAM) {
3661 dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
3662 dram_memory_addr = user_dma_pkt->src_addr;
3663 sram_memory_addr = user_dma_pkt->dst_addr;
3664 } else {
3665 dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
3666 sram_memory_addr = user_dma_pkt->src_addr;
3667 dram_memory_addr = user_dma_pkt->dst_addr;
3668 }
3669
3670 if (!hl_mem_area_inside_range(sram_memory_addr, user_dma_pkt->tsize,
3671 hdev->asic_prop.sram_user_base_address,
3672 hdev->asic_prop.sram_end_address)) {
3673 dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3674 sram_memory_addr, user_dma_pkt->tsize);
3675 return -EFAULT;
3676 }
3677
3678 if (!hl_mem_area_inside_range(dram_memory_addr, user_dma_pkt->tsize,
3679 hdev->asic_prop.dram_user_base_address,
3680 hdev->asic_prop.dram_end_address)) {
3681 dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3682 dram_memory_addr, user_dma_pkt->tsize);
3683 return -EFAULT;
3684 }
3685
3686 parser->patched_cb_size += sizeof(*user_dma_pkt);
3687
3688 return 0;
3689}
3690
3691static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3692 struct hl_cs_parser *parser,
3693 struct packet_lin_dma *user_dma_pkt)
3694{
3695 enum goya_dma_direction user_dir;
3696 int rc;
3697
3698 dev_dbg(hdev->dev, "DMA packet details:\n");
3699 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3700 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3701 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3702
3703 user_dir = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3704 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3705
3706 /*
3707 * Special handling for DMA with size 0. The H/W has a bug where
3708 * this can cause the QMAN DMA to get stuck, so block it here.
3709 */
3710 if (user_dma_pkt->tsize == 0) {
3711 dev_err(hdev->dev,
3712 "Got DMA with size 0, might reset the device\n");
3713 return -EINVAL;
3714 }
3715
3716 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
3717 rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3718 else
3719 rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3720
3721 return rc;
3722}
3723
3724static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3725 struct hl_cs_parser *parser,
3726 struct packet_lin_dma *user_dma_pkt)
3727{
3728 dev_dbg(hdev->dev, "DMA packet details:\n");
3729 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3730 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3731 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3732
3733 /*
3734 * WA for HW-23.
3735 * We can't allow user to read from Host using QMANs other than 1.
3736 */
3737 if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
3738 hl_mem_area_inside_range(user_dma_pkt->src_addr,
3739 user_dma_pkt->tsize,
3740 hdev->asic_prop.va_space_host_start_address,
3741 hdev->asic_prop.va_space_host_end_address)) {
3742 dev_err(hdev->dev,
3743 "Can't DMA from host on queue other then 1\n");
3744 return -EFAULT;
3745 }
3746
3747 if (user_dma_pkt->tsize == 0) {
3748 dev_err(hdev->dev,
3749 "Got DMA with size 0, might reset the device\n");
3750 return -EINVAL;
3751 }
3752
3753 parser->patched_cb_size += sizeof(*user_dma_pkt);
3754
3755 return 0;
3756}
3757
3758static int goya_validate_wreg32(struct hl_device *hdev,
3759 struct hl_cs_parser *parser,
3760 struct packet_wreg32 *wreg_pkt)
3761{
3762 struct goya_device *goya = hdev->asic_specific;
3763 u32 sob_start_addr, sob_end_addr;
3764 u16 reg_offset;
3765
3766 reg_offset = wreg_pkt->ctl & GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
3767
3768 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3769 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3770 dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
3771
Oded Gabbay6765fda2019-02-28 10:46:14 +02003772 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003773 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3774 reg_offset);
3775 return -EPERM;
3776 }
3777
3778 /*
3779 * With MMU, DMA channels are not secured, so it doesn't matter where
3780 * the WR COMP will be written to because it will go out with
3781 * non-secured property
3782 */
3783 if (goya->hw_cap_initialized & HW_CAP_MMU)
3784 return 0;
3785
3786 sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3787 sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3788
3789 if ((wreg_pkt->value < sob_start_addr) ||
3790 (wreg_pkt->value > sob_end_addr)) {
3791
3792 dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3793 wreg_pkt->value);
3794 return -EPERM;
3795 }
3796
3797 return 0;
3798}
3799
3800static int goya_validate_cb(struct hl_device *hdev,
3801 struct hl_cs_parser *parser, bool is_mmu)
3802{
3803 u32 cb_parsed_length = 0;
3804 int rc = 0;
3805
3806 parser->patched_cb_size = 0;
3807
3808 /* cb_user_size is more than 0 so loop will always be executed */
3809 while (cb_parsed_length < parser->user_cb_size) {
3810 enum packet_id pkt_id;
3811 u16 pkt_size;
3812 void *user_pkt;
3813
3814 user_pkt = (void *) (uintptr_t)
3815 (parser->user_cb->kernel_address + cb_parsed_length);
3816
3817 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
3818 PACKET_HEADER_PACKET_ID_MASK) >>
3819 PACKET_HEADER_PACKET_ID_SHIFT);
3820
3821 pkt_size = goya_packet_sizes[pkt_id];
3822 cb_parsed_length += pkt_size;
3823 if (cb_parsed_length > parser->user_cb_size) {
3824 dev_err(hdev->dev,
3825 "packet 0x%x is out of CB boundary\n", pkt_id);
3826 rc = -EINVAL;
3827 break;
3828 }
3829
3830 switch (pkt_id) {
3831 case PACKET_WREG_32:
3832 /*
3833 * Although it is validated after copy in patch_cb(),
3834 * need to validate here as well because patch_cb() is
3835 * not called in MMU path while this function is called
3836 */
3837 rc = goya_validate_wreg32(hdev, parser, user_pkt);
3838 break;
3839
3840 case PACKET_WREG_BULK:
3841 dev_err(hdev->dev,
3842 "User not allowed to use WREG_BULK\n");
3843 rc = -EPERM;
3844 break;
3845
3846 case PACKET_MSG_PROT:
3847 dev_err(hdev->dev,
3848 "User not allowed to use MSG_PROT\n");
3849 rc = -EPERM;
3850 break;
3851
3852 case PACKET_CP_DMA:
3853 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3854 rc = -EPERM;
3855 break;
3856
3857 case PACKET_STOP:
3858 dev_err(hdev->dev, "User not allowed to use STOP\n");
3859 rc = -EPERM;
3860 break;
3861
3862 case PACKET_LIN_DMA:
3863 if (is_mmu)
3864 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3865 user_pkt);
3866 else
3867 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3868 user_pkt);
3869 break;
3870
3871 case PACKET_MSG_LONG:
3872 case PACKET_MSG_SHORT:
3873 case PACKET_FENCE:
3874 case PACKET_NOP:
3875 parser->patched_cb_size += pkt_size;
3876 break;
3877
3878 default:
3879 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3880 pkt_id);
3881 rc = -EINVAL;
3882 break;
3883 }
3884
3885 if (rc)
3886 break;
3887 }
3888
3889 /*
3890 * The new CB should have space at the end for two MSG_PROT packets:
3891 * 1. A packet that will act as a completion packet
3892 * 2. A packet that will generate MSI-X interrupt
3893 */
3894 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3895
3896 return rc;
3897}
3898
3899static int goya_patch_dma_packet(struct hl_device *hdev,
3900 struct hl_cs_parser *parser,
3901 struct packet_lin_dma *user_dma_pkt,
3902 struct packet_lin_dma *new_dma_pkt,
3903 u32 *new_dma_pkt_size)
3904{
3905 struct hl_userptr *userptr;
3906 struct scatterlist *sg, *sg_next_iter;
Oded Gabbaye99f16832019-02-24 11:55:26 +02003907 u32 count, dma_desc_cnt;
3908 u64 len, len_next;
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02003909 dma_addr_t dma_addr, dma_addr_next;
3910 enum goya_dma_direction user_dir;
3911 u64 device_memory_addr, addr;
3912 enum dma_data_direction dir;
3913 struct sg_table *sgt;
3914 bool skip_host_mem_pin = false;
3915 bool user_memset;
3916 u32 user_rdcomp_mask, user_wrcomp_mask;
3917
3918 user_dir = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3919 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3920
3921 user_memset = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3922 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3923
3924 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
3925 (user_dma_pkt->tsize == 0)) {
3926 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3927 *new_dma_pkt_size = sizeof(*new_dma_pkt);
3928 return 0;
3929 }
3930
3931 if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
3932 addr = user_dma_pkt->src_addr;
3933 device_memory_addr = user_dma_pkt->dst_addr;
3934 dir = DMA_TO_DEVICE;
3935 if (user_memset)
3936 skip_host_mem_pin = true;
3937 } else {
3938 addr = user_dma_pkt->dst_addr;
3939 device_memory_addr = user_dma_pkt->src_addr;
3940 dir = DMA_FROM_DEVICE;
3941 }
3942
3943 if ((!skip_host_mem_pin) &&
3944 (hl_userptr_is_pinned(hdev, addr, user_dma_pkt->tsize,
3945 parser->job_userptr_list, &userptr) == false)) {
3946 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3947 addr, user_dma_pkt->tsize);
3948 return -EFAULT;
3949 }
3950
3951 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3952 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3953 *new_dma_pkt_size = sizeof(*user_dma_pkt);
3954 return 0;
3955 }
3956
3957 user_rdcomp_mask =
3958 (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK);
3959
3960 user_wrcomp_mask =
3961 (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
3962
3963 sgt = userptr->sgt;
3964 dma_desc_cnt = 0;
3965
3966 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3967 len = sg_dma_len(sg);
3968 dma_addr = sg_dma_address(sg);
3969
3970 if (len == 0)
3971 break;
3972
3973 while ((count + 1) < sgt->nents) {
3974 sg_next_iter = sg_next(sg);
3975 len_next = sg_dma_len(sg_next_iter);
3976 dma_addr_next = sg_dma_address(sg_next_iter);
3977
3978 if (len_next == 0)
3979 break;
3980
3981 if ((dma_addr + len == dma_addr_next) &&
3982 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3983 len += len_next;
3984 count++;
3985 sg = sg_next_iter;
3986 } else {
3987 break;
3988 }
3989 }
3990
3991 new_dma_pkt->ctl = user_dma_pkt->ctl;
3992 if (likely(dma_desc_cnt))
3993 new_dma_pkt->ctl &= ~GOYA_PKT_CTL_EB_MASK;
3994 new_dma_pkt->ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
3995 GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
3996 new_dma_pkt->tsize = len;
3997
3998 dma_addr += hdev->asic_prop.host_phys_base_address;
3999
4000 if (dir == DMA_TO_DEVICE) {
4001 new_dma_pkt->src_addr = dma_addr;
4002 new_dma_pkt->dst_addr = device_memory_addr;
4003 } else {
4004 new_dma_pkt->src_addr = device_memory_addr;
4005 new_dma_pkt->dst_addr = dma_addr;
4006 }
4007
4008 if (!user_memset)
4009 device_memory_addr += len;
4010 dma_desc_cnt++;
4011 new_dma_pkt++;
4012 }
4013
4014 if (!dma_desc_cnt) {
4015 dev_err(hdev->dev,
4016 "Error of 0 SG entries when patching DMA packet\n");
4017 return -EFAULT;
4018 }
4019
4020 /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
4021 new_dma_pkt--;
4022 new_dma_pkt->ctl |= (user_rdcomp_mask | user_wrcomp_mask);
4023
4024 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
4025
4026 return 0;
4027}
4028
4029static int goya_patch_cb(struct hl_device *hdev,
4030 struct hl_cs_parser *parser)
4031{
4032 u32 cb_parsed_length = 0;
4033 u32 cb_patched_cur_length = 0;
4034 int rc = 0;
4035
4036 /* cb_user_size is more than 0 so loop will always be executed */
4037 while (cb_parsed_length < parser->user_cb_size) {
4038 enum packet_id pkt_id;
4039 u16 pkt_size;
4040 u32 new_pkt_size = 0;
4041 void *user_pkt, *kernel_pkt;
4042
4043 user_pkt = (void *) (uintptr_t)
4044 (parser->user_cb->kernel_address + cb_parsed_length);
4045 kernel_pkt = (void *) (uintptr_t)
4046 (parser->patched_cb->kernel_address +
4047 cb_patched_cur_length);
4048
4049 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
4050 PACKET_HEADER_PACKET_ID_MASK) >>
4051 PACKET_HEADER_PACKET_ID_SHIFT);
4052
4053 pkt_size = goya_packet_sizes[pkt_id];
4054 cb_parsed_length += pkt_size;
4055 if (cb_parsed_length > parser->user_cb_size) {
4056 dev_err(hdev->dev,
4057 "packet 0x%x is out of CB boundary\n", pkt_id);
4058 rc = -EINVAL;
4059 break;
4060 }
4061
4062 switch (pkt_id) {
4063 case PACKET_LIN_DMA:
4064 rc = goya_patch_dma_packet(hdev, parser, user_pkt,
4065 kernel_pkt, &new_pkt_size);
4066 cb_patched_cur_length += new_pkt_size;
4067 break;
4068
4069 case PACKET_WREG_32:
4070 memcpy(kernel_pkt, user_pkt, pkt_size);
4071 cb_patched_cur_length += pkt_size;
4072 rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
4073 break;
4074
4075 case PACKET_WREG_BULK:
4076 dev_err(hdev->dev,
4077 "User not allowed to use WREG_BULK\n");
4078 rc = -EPERM;
4079 break;
4080
4081 case PACKET_MSG_PROT:
4082 dev_err(hdev->dev,
4083 "User not allowed to use MSG_PROT\n");
4084 rc = -EPERM;
4085 break;
4086
4087 case PACKET_CP_DMA:
4088 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
4089 rc = -EPERM;
4090 break;
4091
4092 case PACKET_STOP:
4093 dev_err(hdev->dev, "User not allowed to use STOP\n");
4094 rc = -EPERM;
4095 break;
4096
4097 case PACKET_MSG_LONG:
4098 case PACKET_MSG_SHORT:
4099 case PACKET_FENCE:
4100 case PACKET_NOP:
4101 memcpy(kernel_pkt, user_pkt, pkt_size);
4102 cb_patched_cur_length += pkt_size;
4103 break;
4104
4105 default:
4106 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
4107 pkt_id);
4108 rc = -EINVAL;
4109 break;
4110 }
4111
4112 if (rc)
4113 break;
4114 }
4115
4116 return rc;
4117}
4118
4119static int goya_parse_cb_mmu(struct hl_device *hdev,
4120 struct hl_cs_parser *parser)
4121{
4122 u64 patched_cb_handle;
4123 u32 patched_cb_size;
4124 struct hl_cb *user_cb;
4125 int rc;
4126
4127 /*
4128 * The new CB should have space at the end for two MSG_PROT pkt:
4129 * 1. A packet that will act as a completion packet
4130 * 2. A packet that will generate MSI-X interrupt
4131 */
4132 parser->patched_cb_size = parser->user_cb_size +
4133 sizeof(struct packet_msg_prot) * 2;
4134
4135 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
4136 parser->patched_cb_size,
4137 &patched_cb_handle, HL_KERNEL_ASID_ID);
4138
4139 if (rc) {
4140 dev_err(hdev->dev,
4141 "Failed to allocate patched CB for DMA CS %d\n",
4142 rc);
4143 return rc;
4144 }
4145
4146 patched_cb_handle >>= PAGE_SHIFT;
4147 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
4148 (u32) patched_cb_handle);
4149 /* hl_cb_get should never fail here so use kernel WARN */
4150 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
4151 (u32) patched_cb_handle);
4152 if (!parser->patched_cb) {
4153 rc = -EFAULT;
4154 goto out;
4155 }
4156
4157 /*
4158 * The check that parser->user_cb_size <= parser->user_cb->size was done
4159 * in validate_queue_index().
4160 */
4161 memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
4162 (void *) (uintptr_t) parser->user_cb->kernel_address,
4163 parser->user_cb_size);
4164
4165 patched_cb_size = parser->patched_cb_size;
4166
4167 /* validate patched CB instead of user CB */
4168 user_cb = parser->user_cb;
4169 parser->user_cb = parser->patched_cb;
4170 rc = goya_validate_cb(hdev, parser, true);
4171 parser->user_cb = user_cb;
4172
4173 if (rc) {
4174 hl_cb_put(parser->patched_cb);
4175 goto out;
4176 }
4177
4178 if (patched_cb_size != parser->patched_cb_size) {
4179 dev_err(hdev->dev, "user CB size mismatch\n");
4180 hl_cb_put(parser->patched_cb);
4181 rc = -EINVAL;
4182 goto out;
4183 }
4184
4185out:
4186 /*
4187 * Always call cb destroy here because we still have 1 reference
4188 * to it by calling cb_get earlier. After the job will be completed,
4189 * cb_put will release it, but here we want to remove it from the
4190 * idr
4191 */
4192 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4193 patched_cb_handle << PAGE_SHIFT);
4194
4195 return rc;
4196}
4197
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004198static int goya_parse_cb_no_mmu(struct hl_device *hdev,
4199 struct hl_cs_parser *parser)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004200{
4201 u64 patched_cb_handle;
4202 int rc;
4203
4204 rc = goya_validate_cb(hdev, parser, false);
4205
4206 if (rc)
4207 goto free_userptr;
4208
4209 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
4210 parser->patched_cb_size,
4211 &patched_cb_handle, HL_KERNEL_ASID_ID);
4212 if (rc) {
4213 dev_err(hdev->dev,
4214 "Failed to allocate patched CB for DMA CS %d\n", rc);
4215 goto free_userptr;
4216 }
4217
4218 patched_cb_handle >>= PAGE_SHIFT;
4219 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
4220 (u32) patched_cb_handle);
4221 /* hl_cb_get should never fail here so use kernel WARN */
4222 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
4223 (u32) patched_cb_handle);
4224 if (!parser->patched_cb) {
4225 rc = -EFAULT;
4226 goto out;
4227 }
4228
4229 rc = goya_patch_cb(hdev, parser);
4230
4231 if (rc)
4232 hl_cb_put(parser->patched_cb);
4233
4234out:
4235 /*
4236 * Always call cb destroy here because we still have 1 reference
4237 * to it by calling cb_get earlier. After the job will be completed,
4238 * cb_put will release it, but here we want to remove it from the
4239 * idr
4240 */
4241 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4242 patched_cb_handle << PAGE_SHIFT);
4243
4244free_userptr:
4245 if (rc)
4246 hl_userptr_delete_list(hdev, parser->job_userptr_list);
4247 return rc;
4248}
4249
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004250static int goya_parse_cb_no_ext_quque(struct hl_device *hdev,
4251 struct hl_cs_parser *parser)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004252{
4253 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
4254 struct goya_device *goya = hdev->asic_specific;
4255
4256 if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
4257 /* For internal queue jobs, just check if cb address is valid */
4258 if (hl_mem_area_inside_range(
4259 (u64) (uintptr_t) parser->user_cb,
4260 parser->user_cb_size,
4261 asic_prop->sram_user_base_address,
4262 asic_prop->sram_end_address))
4263 return 0;
4264
4265 if (hl_mem_area_inside_range(
4266 (u64) (uintptr_t) parser->user_cb,
4267 parser->user_cb_size,
4268 asic_prop->dram_user_base_address,
4269 asic_prop->dram_end_address))
4270 return 0;
4271
4272 dev_err(hdev->dev,
4273 "Internal CB address 0x%llx + 0x%x is not in SRAM nor in DRAM\n",
4274 (u64) (uintptr_t) parser->user_cb,
4275 parser->user_cb_size);
4276
4277 return -EFAULT;
4278 }
4279
4280 return 0;
4281}
4282
4283int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
4284{
4285 struct goya_device *goya = hdev->asic_specific;
4286
4287 if (!parser->ext_queue)
4288 return goya_parse_cb_no_ext_quque(hdev, parser);
4289
4290 if ((goya->hw_cap_initialized & HW_CAP_MMU) && parser->use_virt_addr)
4291 return goya_parse_cb_mmu(hdev, parser);
4292 else
4293 return goya_parse_cb_no_mmu(hdev, parser);
4294}
4295
4296void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
4297 u32 cq_val, u32 msix_vec)
4298{
4299 struct packet_msg_prot *cq_pkt;
4300
4301 cq_pkt = (struct packet_msg_prot *) (uintptr_t)
4302 (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
4303
4304 cq_pkt->ctl = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4305 (1 << GOYA_PKT_CTL_EB_SHIFT) |
4306 (1 << GOYA_PKT_CTL_MB_SHIFT);
4307 cq_pkt->value = cq_val;
4308 cq_pkt->addr = cq_addr;
4309
4310 cq_pkt++;
4311
4312 cq_pkt->ctl = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4313 (1 << GOYA_PKT_CTL_MB_SHIFT);
4314 cq_pkt->value = msix_vec & 0x7FF;
4315 cq_pkt->addr = CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF;
4316}
4317
Oded Gabbay1251f232019-02-16 00:39:18 +02004318static void goya_update_eq_ci(struct hl_device *hdev, u32 val)
4319{
4320 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
4321}
4322
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004323static void goya_restore_phase_topology(struct hl_device *hdev)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02004324{
4325 int i, num_of_sob_in_longs, num_of_mon_in_longs;
4326
4327 num_of_sob_in_longs =
4328 ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
4329
4330 num_of_mon_in_longs =
4331 ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
4332
4333 for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
4334 WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
4335
4336 for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
4337 WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
4338
4339 /* Flush all WREG to prevent race */
4340 i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
4341}
4342
Oded Gabbayc2164772019-02-16 00:39:24 +02004343/*
4344 * goya_debugfs_read32 - read a 32bit value from a given device address
4345 *
4346 * @hdev: pointer to hl_device structure
4347 * @addr: address in device
4348 * @val: returned value
4349 *
4350 * In case of DDR address that is not mapped into the default aperture that
4351 * the DDR bar exposes, the function will configure the iATU so that the DDR
4352 * bar will be positioned at a base address that allows reading from the
4353 * required address. Configuring the iATU during normal operation can
4354 * lead to undefined behavior and therefore, should be done with extreme care
4355 *
4356 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004357static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
Oded Gabbayc2164772019-02-16 00:39:24 +02004358{
4359 struct asic_fixed_properties *prop = &hdev->asic_prop;
4360 int rc = 0;
4361
4362 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4363 *val = RREG32(addr - CFG_BASE);
4364
4365 } else if ((addr >= SRAM_BASE_ADDR) &&
4366 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4367
4368 *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4369 (addr - SRAM_BASE_ADDR));
4370
4371 } else if ((addr >= DRAM_PHYS_BASE) &&
4372 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4373
4374 u64 bar_base_addr = DRAM_PHYS_BASE +
4375 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4376
4377 rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
4378 if (!rc) {
4379 *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
4380 (addr - bar_base_addr));
4381
4382 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
4383 (MMU_PAGE_TABLES_ADDR &
4384 ~(prop->dram_pci_bar_size - 0x1ull)));
4385 }
4386 } else {
4387 rc = -EFAULT;
4388 }
4389
4390 return rc;
4391}
4392
4393/*
4394 * goya_debugfs_write32 - write a 32bit value to a given device address
4395 *
4396 * @hdev: pointer to hl_device structure
4397 * @addr: address in device
4398 * @val: returned value
4399 *
4400 * In case of DDR address that is not mapped into the default aperture that
4401 * the DDR bar exposes, the function will configure the iATU so that the DDR
4402 * bar will be positioned at a base address that allows writing to the
4403 * required address. Configuring the iATU during normal operation can
4404 * lead to undefined behavior and therefore, should be done with extreme care
4405 *
4406 */
Oded Gabbay5e6e0232019-02-27 12:15:16 +02004407static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
Oded Gabbayc2164772019-02-16 00:39:24 +02004408{
4409 struct asic_fixed_properties *prop = &hdev->asic_prop;
4410 int rc = 0;
4411
4412 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4413 WREG32(addr - CFG_BASE, val);
4414
4415 } else if ((addr >= SRAM_BASE_ADDR) &&
4416 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4417
4418 writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4419 (addr - SRAM_BASE_ADDR));
4420
4421 } else if ((addr >= DRAM_PHYS_BASE) &&
4422 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4423
4424 u64 bar_base_addr = DRAM_PHYS_BASE +
4425 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4426
4427 rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
4428 if (!rc) {
4429 writel(val, hdev->pcie_bar[DDR_BAR_ID] +
4430 (addr - bar_base_addr));
4431
4432 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
4433 (MMU_PAGE_TABLES_ADDR &
4434 ~(prop->dram_pci_bar_size - 0x1ull)));
4435 }
4436 } else {
4437 rc = -EFAULT;
4438 }
4439
4440 return rc;
4441}
4442
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004443static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4444{
4445 struct goya_device *goya = hdev->asic_specific;
4446
4447 return readq(hdev->pcie_bar[DDR_BAR_ID] +
4448 (addr - goya->ddr_bar_cur_addr));
4449}
4450
4451static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4452{
4453 struct goya_device *goya = hdev->asic_specific;
4454
4455 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4456 (addr - goya->ddr_bar_cur_addr));
4457}
4458
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004459static const char *_goya_get_event_desc(u16 event_type)
Oded Gabbay1251f232019-02-16 00:39:18 +02004460{
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004461 switch (event_type) {
4462 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4463 return "PCIe_dec";
4464 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4465 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4466 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4467 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4468 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4469 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4470 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4471 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4472 return "TPC%d_dec";
4473 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4474 return "MME_wacs";
4475 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4476 return "MME_wacsd";
4477 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4478 return "CPU_axi_splitter";
4479 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4480 return "PSOC_axi_dec";
4481 case GOYA_ASYNC_EVENT_ID_PSOC:
4482 return "PSOC";
4483 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4484 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4485 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4486 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4487 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4488 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4489 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4490 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4491 return "TPC%d_krn_err";
4492 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4493 return "TPC%d_cq";
4494 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4495 return "TPC%d_qm";
4496 case GOYA_ASYNC_EVENT_ID_MME_QM:
4497 return "MME_qm";
4498 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4499 return "MME_cq";
4500 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4501 return "DMA%d_qm";
4502 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4503 return "DMA%d_ch";
4504 default:
4505 return "N/A";
4506 }
Oded Gabbay1251f232019-02-16 00:39:18 +02004507}
4508
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004509static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
Oded Gabbay1251f232019-02-16 00:39:18 +02004510{
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004511 u8 index;
Oded Gabbay1251f232019-02-16 00:39:18 +02004512
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004513 switch (event_type) {
4514 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4515 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4516 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4517 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4518 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4519 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4520 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4521 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4522 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4523 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4524 break;
4525 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4526 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4527 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4528 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4529 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4530 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4531 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4532 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4533 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4534 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4535 break;
4536 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4537 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4538 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4539 break;
4540 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4541 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4542 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4543 break;
4544 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4545 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4546 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4547 break;
4548 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4549 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4550 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4551 break;
4552 default:
4553 snprintf(desc, size, _goya_get_event_desc(event_type));
4554 break;
4555 }
4556}
Oded Gabbay1251f232019-02-16 00:39:18 +02004557
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004558static void goya_print_razwi_info(struct hl_device *hdev)
4559{
4560 if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4561 dev_err(hdev->dev, "Illegal write to LBW\n");
4562 WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4563 }
Oded Gabbay1251f232019-02-16 00:39:18 +02004564
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004565 if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4566 dev_err(hdev->dev, "Illegal read from LBW\n");
4567 WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4568 }
4569
4570 if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4571 dev_err(hdev->dev, "Illegal write to HBW\n");
4572 WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4573 }
4574
4575 if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4576 dev_err(hdev->dev, "Illegal read from HBW\n");
4577 WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4578 }
4579}
4580
4581static void goya_print_mmu_error_info(struct hl_device *hdev)
4582{
4583 struct goya_device *goya = hdev->asic_specific;
4584 u64 addr;
4585 u32 val;
4586
4587 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4588 return;
4589
4590 val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4591 if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4592 addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4593 addr <<= 32;
4594 addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4595
4596 dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr);
4597
4598 WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
Oded Gabbay1251f232019-02-16 00:39:18 +02004599 }
4600}
4601
4602static void goya_print_irq_info(struct hl_device *hdev, u16 event_type)
4603{
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004604 char desc[20] = "";
Oded Gabbay1251f232019-02-16 00:39:18 +02004605
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004606 goya_get_event_desc(event_type, desc, sizeof(desc));
4607 dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4608 event_type, desc);
Oded Gabbay1251f232019-02-16 00:39:18 +02004609
Tomer Tayar60b7dcc2019-02-28 10:46:10 +02004610 goya_print_razwi_info(hdev);
4611 goya_print_mmu_error_info(hdev);
Oded Gabbay1251f232019-02-16 00:39:18 +02004612}
4613
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004614static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4615 size_t irq_arr_size)
4616{
4617 struct armcp_unmask_irq_arr_packet *pkt;
4618 size_t total_pkt_size;
4619 long result;
4620 int rc;
4621
4622 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
4623 irq_arr_size;
4624
4625 /* data should be aligned to 8 bytes in order to ArmCP to copy it */
4626 total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4627
4628 /* total_pkt_size is casted to u16 later on */
4629 if (total_pkt_size > USHRT_MAX) {
4630 dev_err(hdev->dev, "too many elements in IRQ array\n");
4631 return -EINVAL;
4632 }
4633
4634 pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4635 if (!pkt)
4636 return -ENOMEM;
4637
4638 pkt->length = irq_arr_size / sizeof(irq_arr[0]);
4639 memcpy(&pkt->irqs, irq_arr, irq_arr_size);
4640
4641 pkt->armcp_pkt.ctl = ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4642 ARMCP_PKT_CTL_OPCODE_SHIFT;
4643
4644 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
4645 total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
4646
4647 if (rc)
4648 dev_err(hdev->dev, "failed to unmask IRQ array\n");
4649
4650 kfree(pkt);
4651
4652 return rc;
4653}
4654
4655static int goya_soft_reset_late_init(struct hl_device *hdev)
4656{
4657 /*
4658 * Unmask all IRQs since some could have been received
4659 * during the soft reset
4660 */
4661 return goya_unmask_irq_arr(hdev, goya_non_fatal_events,
4662 sizeof(goya_non_fatal_events));
4663}
4664
Oded Gabbay1251f232019-02-16 00:39:18 +02004665static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4666{
4667 struct armcp_packet pkt;
4668 long result;
4669 int rc;
4670
4671 memset(&pkt, 0, sizeof(pkt));
4672
4673 pkt.ctl = ARMCP_PACKET_UNMASK_RAZWI_IRQ << ARMCP_PKT_CTL_OPCODE_SHIFT;
4674 pkt.value = event_type;
4675
4676 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4677 HL_DEVICE_TIMEOUT_USEC, &result);
4678
4679 if (rc)
4680 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4681
4682 return rc;
4683}
4684
4685void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4686{
4687 u16 event_type = ((eq_entry->hdr.ctl & EQ_CTL_EVENT_TYPE_MASK)
4688 >> EQ_CTL_EVENT_TYPE_SHIFT);
4689 struct goya_device *goya = hdev->asic_specific;
4690
4691 goya->events_stat[event_type]++;
4692
4693 switch (event_type) {
4694 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4695 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4696 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4697 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4698 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4699 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4700 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4701 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4702 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4703 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4704 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4705 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4706 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4707 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4708 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4709 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4710 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4711 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4712 case GOYA_ASYNC_EVENT_ID_GIC500:
4713 case GOYA_ASYNC_EVENT_ID_PLL0:
4714 case GOYA_ASYNC_EVENT_ID_PLL1:
4715 case GOYA_ASYNC_EVENT_ID_PLL3:
4716 case GOYA_ASYNC_EVENT_ID_PLL4:
4717 case GOYA_ASYNC_EVENT_ID_PLL5:
4718 case GOYA_ASYNC_EVENT_ID_PLL6:
4719 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4720 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4721 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4722 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4723 dev_err(hdev->dev,
4724 "Received H/W interrupt %d, reset the chip\n",
4725 event_type);
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02004726 hl_device_reset(hdev, true, false);
Oded Gabbay1251f232019-02-16 00:39:18 +02004727 break;
4728
4729 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4730 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4731 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4732 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4733 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4734 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4735 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4736 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4737 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4738 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4739 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4740 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4741 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4742 case GOYA_ASYNC_EVENT_ID_PSOC:
4743 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4744 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4745 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4746 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4747 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4748 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4749 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4750 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4751 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4752 case GOYA_ASYNC_EVENT_ID_MME_QM:
4753 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4754 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4755 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4756 goya_print_irq_info(hdev, event_type);
4757 goya_unmask_irq(hdev, event_type);
4758 break;
4759
4760 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4761 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4762 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4763 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4764 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4765 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4766 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4767 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4768 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0:
4769 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH1:
4770 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH2:
4771 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH3:
4772 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4773 dev_info(hdev->dev, "Received H/W interrupt %d\n", event_type);
4774 break;
4775
4776 default:
4777 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4778 event_type);
4779 break;
4780 }
4781}
4782
4783void *goya_get_events_stat(struct hl_device *hdev, u32 *size)
4784{
4785 struct goya_device *goya = hdev->asic_specific;
4786
4787 *size = (u32) sizeof(goya->events_stat);
4788
4789 return goya->events_stat;
4790}
4791
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004792static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
4793 u64 val, bool is_dram)
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004794{
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004795 struct packet_lin_dma *lin_dma_pkt;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004796 struct hl_cs_parser parser;
4797 struct hl_cs_job *job;
4798 u32 cb_size;
4799 struct hl_cb *cb;
4800 int rc;
4801
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004802 cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
4803 if (!cb)
4804 return -EFAULT;
4805
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004806 lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004807
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004808 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4809 cb_size = sizeof(*lin_dma_pkt);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004810
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004811 lin_dma_pkt->ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4812 (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4813 (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4814 (1 << GOYA_PKT_CTL_RB_SHIFT) |
4815 (1 << GOYA_PKT_CTL_MB_SHIFT));
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004816
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004817 lin_dma_pkt->ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
4818 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4819
4820 lin_dma_pkt->src_addr = val;
4821 lin_dma_pkt->dst_addr = addr;
4822 lin_dma_pkt->tsize = size;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004823
4824 job = hl_cs_allocate_job(hdev, true);
4825 if (!job) {
4826 dev_err(hdev->dev, "Failed to allocate a new job\n");
4827 rc = -ENOMEM;
4828 goto release_cb;
4829 }
4830
4831 job->id = 0;
4832 job->user_cb = cb;
4833 job->user_cb->cs_cnt++;
4834 job->user_cb_size = cb_size;
4835 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4836
Oded Gabbayc2164772019-02-16 00:39:24 +02004837 hl_debugfs_add_job(hdev, job);
4838
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004839 parser.ctx_id = HL_KERNEL_ASID_ID;
4840 parser.cs_sequence = 0;
4841 parser.job_id = job->id;
4842 parser.hw_queue_id = job->hw_queue_id;
4843 parser.job_userptr_list = &job->userptr_list;
4844 parser.user_cb = job->user_cb;
4845 parser.user_cb_size = job->user_cb_size;
4846 parser.ext_queue = job->ext_queue;
4847 parser.use_virt_addr = hdev->mmu_enable;
4848
4849 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
4850 if (rc) {
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004851 dev_err(hdev->dev, "Failed to parse kernel CB\n");
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004852 goto free_job;
4853 }
4854
4855 job->patched_cb = parser.patched_cb;
4856 job->job_cb_size = parser.patched_cb_size;
4857 job->patched_cb->cs_cnt++;
4858
4859 rc = goya_send_job_on_qman0(hdev, job);
4860
4861 job->patched_cb->cs_cnt--;
4862 hl_cb_put(job->patched_cb);
4863
4864free_job:
4865 hl_userptr_delete_list(hdev, &job->userptr_list);
Oded Gabbayc2164772019-02-16 00:39:24 +02004866 hl_debugfs_remove_job(hdev, job);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004867 kfree(job);
4868 cb->cs_cnt--;
4869
4870release_cb:
4871 hl_cb_put(cb);
4872 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
4873
4874 return rc;
4875}
4876
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02004877static int goya_context_switch(struct hl_device *hdev, u32 asid)
4878{
4879 struct asic_fixed_properties *prop = &hdev->asic_prop;
4880 u64 addr = prop->sram_base_address;
4881 u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4882 u64 val = 0x7777777777777777ull;
4883 int rc;
4884
4885 rc = goya_memset_device_memory(hdev, addr, size, val, false);
4886 if (rc) {
4887 dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4888 return rc;
4889 }
4890
4891 goya_mmu_prepare(hdev, asid);
4892
4893 return 0;
4894}
4895
4896static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4897{
4898 struct asic_fixed_properties *prop = &hdev->asic_prop;
4899 struct goya_device *goya = hdev->asic_specific;
4900 u64 addr = prop->mmu_pgt_addr;
4901 u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4902 MMU_CACHE_MNG_SIZE;
4903
4904 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4905 return 0;
4906
4907 return goya_memset_device_memory(hdev, addr, size, 0, true);
4908}
4909
4910static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4911{
4912 struct goya_device *goya = hdev->asic_specific;
4913 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
4914 u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
4915 u64 val = 0x9999999999999999ull;
4916
4917 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4918 return 0;
4919
4920 return goya_memset_device_memory(hdev, addr, size, val, true);
4921}
4922
Omer Shpigelman0feaf862019-02-16 00:39:22 +02004923static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
4924{
4925 struct goya_device *goya = hdev->asic_specific;
4926 int i;
4927
4928 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4929 return;
4930
4931 if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
4932 WARN(1, "asid %u is too big\n", asid);
4933 return;
4934 }
4935
4936 /* zero the MMBP and ASID bits and then set the ASID */
4937 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) {
4938 WREG32_AND(goya_mmu_regs[i], ~0x7FF);
4939 WREG32_OR(goya_mmu_regs[i], asid);
4940 }
4941}
4942
4943static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
4944{
4945 struct goya_device *goya = hdev->asic_specific;
4946 u32 status, timeout_usec;
4947 int rc;
4948
4949 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4950 return;
4951
4952 /* no need in L1 only invalidation in Goya */
4953 if (!is_hard)
4954 return;
4955
4956 if (hdev->pldm)
4957 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4958 else
4959 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4960
4961 mutex_lock(&hdev->mmu_cache_lock);
4962
4963 /* L0 & L1 invalidation */
4964 WREG32(mmSTLB_INV_ALL_START, 1);
4965
4966 rc = hl_poll_timeout(
4967 hdev,
4968 mmSTLB_INV_ALL_START,
4969 status,
4970 !status,
4971 1000,
4972 timeout_usec);
4973
4974 mutex_unlock(&hdev->mmu_cache_lock);
4975
4976 if (rc)
4977 dev_notice_ratelimited(hdev->dev,
4978 "Timeout when waiting for MMU cache invalidation\n");
4979}
4980
4981static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
4982 bool is_hard, u32 asid, u64 va, u64 size)
4983{
4984 struct goya_device *goya = hdev->asic_specific;
4985 u32 status, timeout_usec, inv_data, pi;
4986 int rc;
4987
4988 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4989 return;
4990
4991 /* no need in L1 only invalidation in Goya */
4992 if (!is_hard)
4993 return;
4994
4995 if (hdev->pldm)
4996 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4997 else
4998 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4999
5000 mutex_lock(&hdev->mmu_cache_lock);
5001
5002 /*
5003 * TODO: currently invalidate entire L0 & L1 as in regular hard
5004 * invalidation. Need to apply invalidation of specific cache lines with
5005 * mask of ASID & VA & size.
5006 * Note that L1 with be flushed entirely in any case.
5007 */
5008
5009 /* L0 & L1 invalidation */
5010 inv_data = RREG32(mmSTLB_CACHE_INV);
5011 /* PI is 8 bit */
5012 pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
5013 WREG32(mmSTLB_CACHE_INV,
5014 (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
5015
5016 rc = hl_poll_timeout(
5017 hdev,
5018 mmSTLB_INV_CONSUMER_INDEX,
5019 status,
5020 status == pi,
5021 1000,
5022 timeout_usec);
5023
5024 mutex_unlock(&hdev->mmu_cache_lock);
5025
5026 if (rc)
5027 dev_notice_ratelimited(hdev->dev,
5028 "Timeout when waiting for MMU cache invalidation\n");
5029}
5030
5031static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
5032 u64 phys_addr)
5033{
5034 u32 status, timeout_usec;
5035 int rc;
5036
5037 if (hdev->pldm)
5038 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5039 else
5040 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5041
5042 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
5043 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
5044 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
5045
5046 rc = hl_poll_timeout(
5047 hdev,
5048 MMU_ASID_BUSY,
5049 status,
5050 !(status & 0x80000000),
5051 1000,
5052 timeout_usec);
5053
5054 if (rc) {
5055 dev_err(hdev->dev,
5056 "Timeout during MMU hop0 config of asid %d\n", asid);
5057 return rc;
5058 }
5059
5060 return 0;
5061}
5062
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005063int goya_send_heartbeat(struct hl_device *hdev)
5064{
5065 struct goya_device *goya = hdev->asic_specific;
5066 struct armcp_packet hb_pkt;
5067 long result;
5068 int rc;
5069
5070 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5071 return 0;
5072
5073 memset(&hb_pkt, 0, sizeof(hb_pkt));
5074
5075 hb_pkt.ctl = ARMCP_PACKET_TEST << ARMCP_PKT_CTL_OPCODE_SHIFT;
5076 hb_pkt.value = ARMCP_PACKET_FENCE_VAL;
5077
5078 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
5079 sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
5080
5081 if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
5082 rc = -EIO;
5083
5084 return rc;
5085}
5086
Oded Gabbayd91389b2019-02-16 00:39:19 +02005087static int goya_armcp_info_get(struct hl_device *hdev)
5088{
5089 struct goya_device *goya = hdev->asic_specific;
5090 struct asic_fixed_properties *prop = &hdev->asic_prop;
5091 struct armcp_packet pkt;
5092 void *armcp_info_cpu_addr;
5093 dma_addr_t armcp_info_dma_addr;
5094 u64 dram_size;
5095 long result;
5096 int rc;
5097
5098 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5099 return 0;
5100
5101 armcp_info_cpu_addr =
5102 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
5103 sizeof(struct armcp_info), &armcp_info_dma_addr);
5104 if (!armcp_info_cpu_addr) {
5105 dev_err(hdev->dev,
5106 "Failed to allocate DMA memory for ArmCP info packet\n");
5107 return -ENOMEM;
5108 }
5109
5110 memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
5111
5112 memset(&pkt, 0, sizeof(pkt));
5113
5114 pkt.ctl = ARMCP_PACKET_INFO_GET << ARMCP_PKT_CTL_OPCODE_SHIFT;
5115 pkt.addr = armcp_info_dma_addr + prop->host_phys_base_address;
5116 pkt.data_max_size = sizeof(struct armcp_info);
5117
5118 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
5119 GOYA_ARMCP_INFO_TIMEOUT, &result);
5120
5121 if (rc) {
5122 dev_err(hdev->dev,
5123 "Failed to send armcp info pkt, error %d\n", rc);
5124 goto out;
5125 }
5126
5127 memcpy(&prop->armcp_info, armcp_info_cpu_addr,
5128 sizeof(prop->armcp_info));
5129
5130 dram_size = prop->armcp_info.dram_size;
5131 if (dram_size) {
5132 if ((!is_power_of_2(dram_size)) ||
5133 (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
5134 dev_err(hdev->dev,
5135 "F/W reported invalid DRAM size %llu. Trying to use default size\n",
5136 dram_size);
5137 dram_size = DRAM_PHYS_DEFAULT_SIZE;
5138 }
5139
5140 prop->dram_size = dram_size;
5141 prop->dram_end_address = prop->dram_base_address + dram_size;
5142 }
5143
5144 rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
5145 if (rc) {
5146 dev_err(hdev->dev,
5147 "Failed to build hwmon channel info, error %d\n", rc);
5148 rc = -EFAULT;
5149 goto out;
5150 }
5151
5152out:
5153 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
5154 sizeof(struct armcp_info), armcp_info_cpu_addr);
5155
5156 return rc;
5157}
5158
5159static void goya_init_clock_gating(struct hl_device *hdev)
5160{
5161
5162}
5163
5164static void goya_disable_clock_gating(struct hl_device *hdev)
5165{
5166
5167}
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005168
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02005169static bool goya_is_device_idle(struct hl_device *hdev)
5170{
5171 u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
5172 int i;
5173
5174 offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
5175
5176 for (i = 0 ; i < DMA_MAX_NUM ; i++) {
5177 dma_qm_reg = mmDMA_QM_0_GLBL_STS0 + i * offset;
5178
5179 if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
5180 DMA_QM_IDLE_MASK)
5181 return false;
5182 }
5183
5184 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
5185
5186 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
5187 tpc_qm_reg = mmTPC0_QM_GLBL_STS0 + i * offset;
5188 tpc_cmdq_reg = mmTPC0_CMDQ_GLBL_STS0 + i * offset;
5189 tpc_cfg_reg = mmTPC0_CFG_STATUS + i * offset;
5190
5191 if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
5192 TPC_QM_IDLE_MASK)
5193 return false;
5194
5195 if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
5196 TPC_CMDQ_IDLE_MASK)
5197 return false;
5198
5199 if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
5200 TPC_CFG_IDLE_MASK)
5201 return false;
5202 }
5203
5204 if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
5205 MME_QM_IDLE_MASK)
5206 return false;
5207
5208 if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
5209 MME_CMDQ_IDLE_MASK)
5210 return false;
5211
5212 if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
5213 MME_ARCH_IDLE_MASK)
5214 return false;
5215
5216 if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
5217 return false;
5218
5219 return true;
5220}
5221
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005222static void goya_hw_queues_lock(struct hl_device *hdev)
5223{
5224 struct goya_device *goya = hdev->asic_specific;
5225
5226 spin_lock(&goya->hw_queues_lock);
5227}
5228
5229static void goya_hw_queues_unlock(struct hl_device *hdev)
5230{
5231 struct goya_device *goya = hdev->asic_specific;
5232
5233 spin_unlock(&goya->hw_queues_lock);
5234}
5235
Oded Gabbayd8dd7b02019-02-16 00:39:23 +02005236static u32 goya_get_pci_id(struct hl_device *hdev)
5237{
5238 return hdev->pdev->device;
5239}
5240
Oded Gabbay5e6e0232019-02-27 12:15:16 +02005241static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5242 size_t max_size)
Oded Gabbayd91389b2019-02-16 00:39:19 +02005243{
5244 struct goya_device *goya = hdev->asic_specific;
5245 struct asic_fixed_properties *prop = &hdev->asic_prop;
5246 struct armcp_packet pkt;
5247 void *eeprom_info_cpu_addr;
5248 dma_addr_t eeprom_info_dma_addr;
5249 long result;
5250 int rc;
5251
5252 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5253 return 0;
5254
5255 eeprom_info_cpu_addr =
5256 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
5257 max_size, &eeprom_info_dma_addr);
5258 if (!eeprom_info_cpu_addr) {
5259 dev_err(hdev->dev,
5260 "Failed to allocate DMA memory for EEPROM info packet\n");
5261 return -ENOMEM;
5262 }
5263
5264 memset(eeprom_info_cpu_addr, 0, max_size);
5265
5266 memset(&pkt, 0, sizeof(pkt));
5267
5268 pkt.ctl = ARMCP_PACKET_EEPROM_DATA_GET << ARMCP_PKT_CTL_OPCODE_SHIFT;
5269 pkt.addr = eeprom_info_dma_addr + prop->host_phys_base_address;
5270 pkt.data_max_size = max_size;
5271
5272 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
5273 GOYA_ARMCP_EEPROM_TIMEOUT, &result);
5274
5275 if (rc) {
5276 dev_err(hdev->dev,
5277 "Failed to send armcp EEPROM pkt, error %d\n", rc);
5278 goto out;
5279 }
5280
5281 /* result contains the actual size */
5282 memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
5283
5284out:
5285 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
5286 eeprom_info_cpu_addr);
5287
5288 return rc;
5289}
5290
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005291static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
5292{
5293 return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS);
5294}
5295
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005296static const struct hl_asic_funcs goya_funcs = {
5297 .early_init = goya_early_init,
5298 .early_fini = goya_early_fini,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005299 .late_init = goya_late_init,
5300 .late_fini = goya_late_fini,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005301 .sw_init = goya_sw_init,
5302 .sw_fini = goya_sw_fini,
Oded Gabbay839c4802019-02-16 00:39:16 +02005303 .hw_init = goya_hw_init,
5304 .hw_fini = goya_hw_fini,
Oded Gabbay1251f232019-02-16 00:39:18 +02005305 .halt_engines = goya_halt_engines,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005306 .suspend = goya_suspend,
5307 .resume = goya_resume,
Oded Gabbaybe5d9262019-02-16 00:39:15 +02005308 .cb_mmap = goya_cb_mmap,
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005309 .ring_doorbell = goya_ring_doorbell,
5310 .flush_pq_write = goya_flush_pq_write,
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005311 .dma_alloc_coherent = goya_dma_alloc_coherent,
5312 .dma_free_coherent = goya_dma_free_coherent,
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005313 .get_int_queue_base = goya_get_int_queue_base,
5314 .test_queues = goya_test_queues,
5315 .dma_pool_zalloc = goya_dma_pool_zalloc,
5316 .dma_pool_free = goya_dma_pool_free,
5317 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
5318 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02005319 .hl_dma_unmap_sg = goya_dma_unmap_sg,
5320 .cs_parser = goya_cs_parser,
5321 .asic_dma_map_sg = goya_dma_map_sg,
5322 .get_dma_desc_list_size = goya_get_dma_desc_list_size,
5323 .add_end_of_cb_packets = goya_add_end_of_cb_packets,
Oded Gabbay1251f232019-02-16 00:39:18 +02005324 .update_eq_ci = goya_update_eq_ci,
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02005325 .context_switch = goya_context_switch,
5326 .restore_phase_topology = goya_restore_phase_topology,
Oded Gabbayc2164772019-02-16 00:39:24 +02005327 .debugfs_read32 = goya_debugfs_read32,
5328 .debugfs_write32 = goya_debugfs_write32,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005329 .add_device_attr = goya_add_device_attr,
Oded Gabbay1251f232019-02-16 00:39:18 +02005330 .handle_eqe = goya_handle_eqe,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005331 .set_pll_profile = goya_set_pll_profile,
Oded Gabbay1251f232019-02-16 00:39:18 +02005332 .get_events_stat = goya_get_events_stat,
Omer Shpigelman0feaf862019-02-16 00:39:22 +02005333 .read_pte = goya_read_pte,
5334 .write_pte = goya_write_pte,
5335 .mmu_invalidate_cache = goya_mmu_invalidate_cache,
5336 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005337 .send_heartbeat = goya_send_heartbeat,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005338 .enable_clock_gating = goya_init_clock_gating,
5339 .disable_clock_gating = goya_disable_clock_gating,
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02005340 .is_device_idle = goya_is_device_idle,
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005341 .soft_reset_late_init = goya_soft_reset_late_init,
Oded Gabbay9494a8d2019-02-16 00:39:17 +02005342 .hw_queues_lock = goya_hw_queues_lock,
5343 .hw_queues_unlock = goya_hw_queues_unlock,
Oded Gabbayd8dd7b02019-02-16 00:39:23 +02005344 .get_pci_id = goya_get_pci_id,
Oded Gabbayd91389b2019-02-16 00:39:19 +02005345 .get_eeprom_data = goya_get_eeprom_data,
Oded Gabbayf8c8c7d2019-02-16 00:39:20 +02005346 .send_cpu_message = goya_send_cpu_message,
5347 .get_hw_state = goya_get_hw_state
Oded Gabbay99b9d7b2019-02-16 00:39:13 +02005348};
5349
5350/*
5351 * goya_set_asic_funcs - set Goya function pointers
5352 *
5353 * @*hdev: pointer to hl_device structure
5354 *
5355 */
5356void goya_set_asic_funcs(struct hl_device *hdev)
5357{
5358 hdev->asic_funcs = &goya_funcs;
5359}