blob: e2845b2c963da165f15117a895cdd88e44daa38c [file] [log] [blame]
Zhou Wang62c455c2019-08-02 15:57:52 +08001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3#include <linux/acpi.h>
4#include <linux/aer.h>
5#include <linux/bitops.h>
Zhou Wang72c7a682019-08-02 15:57:55 +08006#include <linux/debugfs.h>
Zhou Wang62c455c2019-08-02 15:57:52 +08007#include <linux/init.h>
8#include <linux/io.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/pci.h>
Zhou Wang72c7a682019-08-02 15:57:55 +080012#include <linux/seq_file.h>
Zhou Wang62c455c2019-08-02 15:57:52 +080013#include <linux/topology.h>
Zhangfei Gao9e00df72020-02-11 15:54:25 +080014#include <linux/uacce.h>
Zhou Wang62c455c2019-08-02 15:57:52 +080015#include "zip.h"
16
17#define PCI_DEVICE_ID_ZIP_PF 0xa250
Zhou Wang79e09f32019-08-02 15:57:53 +080018#define PCI_DEVICE_ID_ZIP_VF 0xa251
Zhou Wang62c455c2019-08-02 15:57:52 +080019
20#define HZIP_VF_NUM 63
21#define HZIP_QUEUE_NUM_V1 4096
22#define HZIP_QUEUE_NUM_V2 1024
23
24#define HZIP_CLOCK_GATE_CTRL 0x301004
25#define COMP0_ENABLE BIT(0)
26#define COMP1_ENABLE BIT(1)
27#define DECOMP0_ENABLE BIT(2)
28#define DECOMP1_ENABLE BIT(3)
29#define DECOMP2_ENABLE BIT(4)
30#define DECOMP3_ENABLE BIT(5)
31#define DECOMP4_ENABLE BIT(6)
32#define DECOMP5_ENABLE BIT(7)
33#define ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \
34 DECOMP0_ENABLE | DECOMP1_ENABLE | \
35 DECOMP2_ENABLE | DECOMP3_ENABLE | \
36 DECOMP4_ENABLE | DECOMP5_ENABLE)
37#define DECOMP_CHECK_ENABLE BIT(16)
Zhou Wang72c7a682019-08-02 15:57:55 +080038#define HZIP_FSM_MAX_CNT 0x301008
Zhou Wang62c455c2019-08-02 15:57:52 +080039
40#define HZIP_PORT_ARCA_CHE_0 0x301040
41#define HZIP_PORT_ARCA_CHE_1 0x301044
42#define HZIP_PORT_AWCA_CHE_0 0x301060
43#define HZIP_PORT_AWCA_CHE_1 0x301064
44#define CACHE_ALL_EN 0xffffffff
45
46#define HZIP_BD_RUSER_32_63 0x301110
47#define HZIP_SGL_RUSER_32_63 0x30111c
48#define HZIP_DATA_RUSER_32_63 0x301128
49#define HZIP_DATA_WUSER_32_63 0x301134
50#define HZIP_BD_WUSER_32_63 0x301140
51
Zhou Wang72c7a682019-08-02 15:57:55 +080052#define HZIP_QM_IDEL_STATUS 0x3040e4
Zhou Wang62c455c2019-08-02 15:57:52 +080053
Zhou Wang72c7a682019-08-02 15:57:55 +080054#define HZIP_CORE_DEBUG_COMP_0 0x302000
55#define HZIP_CORE_DEBUG_COMP_1 0x303000
56#define HZIP_CORE_DEBUG_DECOMP_0 0x304000
57#define HZIP_CORE_DEBUG_DECOMP_1 0x305000
58#define HZIP_CORE_DEBUG_DECOMP_2 0x306000
59#define HZIP_CORE_DEBUG_DECOMP_3 0x307000
60#define HZIP_CORE_DEBUG_DECOMP_4 0x308000
61#define HZIP_CORE_DEBUG_DECOMP_5 0x309000
Zhou Wang62c455c2019-08-02 15:57:52 +080062
63#define HZIP_CORE_INT_SOURCE 0x3010A0
Shukun Taneaebf4c2020-01-20 15:30:06 +080064#define HZIP_CORE_INT_MASK_REG 0x3010A4
Shukun Tan84c9b782020-04-03 16:16:39 +080065#define HZIP_CORE_INT_SET 0x3010A8
Zhou Wang62c455c2019-08-02 15:57:52 +080066#define HZIP_CORE_INT_STATUS 0x3010AC
67#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
68#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
Shukun Tande3daf42020-01-20 15:30:07 +080069#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
70#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
71#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
72#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
Shukun Tanf826e6e2020-01-20 15:30:08 +080073#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
74#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
Shukun Taneaebf4c2020-01-20 15:30:06 +080075#define HZIP_CORE_INT_MASK_ALL GENMASK(10, 0)
Zhou Wang72c7a682019-08-02 15:57:55 +080076#define HZIP_COMP_CORE_NUM 2
77#define HZIP_DECOMP_CORE_NUM 6
78#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
79 HZIP_DECOMP_CORE_NUM)
Zhou Wang62c455c2019-08-02 15:57:52 +080080#define HZIP_SQE_SIZE 128
Zhou Wang72c7a682019-08-02 15:57:55 +080081#define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH)
Zhou Wang62c455c2019-08-02 15:57:52 +080082#define HZIP_PF_DEF_Q_NUM 64
83#define HZIP_PF_DEF_Q_BASE 0
84
Zhou Wang72c7a682019-08-02 15:57:55 +080085#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
86#define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
Shukun Tan84c9b782020-04-03 16:16:39 +080087#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
88#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
89#define HZIP_WR_PORT BIT(11)
Zhou Wang62c455c2019-08-02 15:57:52 +080090
Zhou Wang72c7a682019-08-02 15:57:55 +080091#define HZIP_BUF_SIZE 22
Shukun Tanc31dc9f2020-05-15 17:13:59 +080092#define HZIP_SQE_MASK_OFFSET 64
93#define HZIP_SQE_MASK_LEN 48
Zhou Wang62c455c2019-08-02 15:57:52 +080094
95static const char hisi_zip_name[] = "hisi_zip";
Zhou Wang72c7a682019-08-02 15:57:55 +080096static struct dentry *hzip_debugfs_root;
Shukun Tan18f1ab32020-03-10 16:42:50 +080097static struct hisi_qm_list zip_devices;
Zhou Wang62c455c2019-08-02 15:57:52 +080098
99struct hisi_zip_hw_error {
100 u32 int_msk;
101 const char *msg;
102};
103
Longfang Liu6621e642020-05-15 17:13:58 +0800104struct zip_dfx_item {
105 const char *name;
106 u32 offset;
107};
108
109static struct zip_dfx_item zip_dfx_files[] = {
110 {"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)},
111 {"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)},
112 {"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)},
113 {"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)},
114};
115
Zhou Wang62c455c2019-08-02 15:57:52 +0800116static const struct hisi_zip_hw_error zip_hw_error[] = {
117 { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" },
118 { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" },
119 { .int_msk = BIT(2), .msg = "zip_axi_rresp_err" },
120 { .int_msk = BIT(3), .msg = "zip_axi_bresp_err" },
121 { .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" },
122 { .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" },
123 { .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" },
124 { .int_msk = BIT(7), .msg = "zip_pre_in_data_err" },
125 { .int_msk = BIT(8), .msg = "zip_com_inf_err" },
126 { .int_msk = BIT(9), .msg = "zip_enc_inf_err" },
127 { .int_msk = BIT(10), .msg = "zip_pre_out_err" },
128 { /* sentinel */ }
129};
130
Zhou Wang72c7a682019-08-02 15:57:55 +0800131enum ctrl_debug_file_index {
132 HZIP_CURRENT_QM,
133 HZIP_CLEAR_ENABLE,
134 HZIP_DEBUG_FILE_NUM,
135};
136
137static const char * const ctrl_debug_file_name[] = {
138 [HZIP_CURRENT_QM] = "current_qm",
139 [HZIP_CLEAR_ENABLE] = "clear_enable",
140};
141
142struct ctrl_debug_file {
143 enum ctrl_debug_file_index index;
144 spinlock_t lock;
145 struct hisi_zip_ctrl *ctrl;
146};
147
Zhou Wang62c455c2019-08-02 15:57:52 +0800148/*
149 * One ZIP controller has one PF and multiple VFs, some global configurations
150 * which PF has need this structure.
151 *
152 * Just relevant for PF.
153 */
154struct hisi_zip_ctrl {
155 struct hisi_zip *hisi_zip;
Zhou Wang72c7a682019-08-02 15:57:55 +0800156 struct dentry *debug_root;
157 struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
158};
159
160enum {
161 HZIP_COMP_CORE0,
162 HZIP_COMP_CORE1,
163 HZIP_DECOMP_CORE0,
164 HZIP_DECOMP_CORE1,
165 HZIP_DECOMP_CORE2,
166 HZIP_DECOMP_CORE3,
167 HZIP_DECOMP_CORE4,
168 HZIP_DECOMP_CORE5,
169};
170
171static const u64 core_offsets[] = {
172 [HZIP_COMP_CORE0] = 0x302000,
173 [HZIP_COMP_CORE1] = 0x303000,
174 [HZIP_DECOMP_CORE0] = 0x304000,
175 [HZIP_DECOMP_CORE1] = 0x305000,
176 [HZIP_DECOMP_CORE2] = 0x306000,
177 [HZIP_DECOMP_CORE3] = 0x307000,
178 [HZIP_DECOMP_CORE4] = 0x308000,
179 [HZIP_DECOMP_CORE5] = 0x309000,
180};
181
Rikard Falkeborn8f686592020-05-09 00:35:01 +0200182static const struct debugfs_reg32 hzip_dfx_regs[] = {
Zhou Wang72c7a682019-08-02 15:57:55 +0800183 {"HZIP_GET_BD_NUM ", 0x00ull},
184 {"HZIP_GET_RIGHT_BD ", 0x04ull},
185 {"HZIP_GET_ERROR_BD ", 0x08ull},
186 {"HZIP_DONE_BD_NUM ", 0x0cull},
187 {"HZIP_WORK_CYCLE ", 0x10ull},
188 {"HZIP_IDLE_CYCLE ", 0x18ull},
189 {"HZIP_MAX_DELAY ", 0x20ull},
190 {"HZIP_MIN_DELAY ", 0x24ull},
191 {"HZIP_AVG_DELAY ", 0x28ull},
192 {"HZIP_MEM_VISIBLE_DATA ", 0x30ull},
193 {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull},
194 {"HZIP_COMSUMED_BYTE ", 0x38ull},
195 {"HZIP_PRODUCED_BYTE ", 0x40ull},
196 {"HZIP_COMP_INF ", 0x70ull},
197 {"HZIP_PRE_OUT ", 0x78ull},
198 {"HZIP_BD_RD ", 0x7cull},
199 {"HZIP_BD_WR ", 0x80ull},
200 {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull},
201 {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull},
202 {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull},
203 {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull},
204 {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull},
Zhou Wang62c455c2019-08-02 15:57:52 +0800205};
206
207static int pf_q_num_set(const char *val, const struct kernel_param *kp)
208{
Shukun Tan20b291f2020-05-09 17:43:57 +0800209 return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
Zhou Wang62c455c2019-08-02 15:57:52 +0800210}
211
212static const struct kernel_param_ops pf_q_num_ops = {
213 .set = pf_q_num_set,
214 .get = param_get_int,
215};
216
217static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
218module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
219MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
220
Hao Fang35ee2802020-04-02 14:53:03 +0800221static const struct kernel_param_ops vfs_num_ops = {
222 .set = vfs_num_set,
223 .get = param_get_int,
224};
225
Hao Fang39977f42019-11-07 11:48:29 +0800226static u32 vfs_num;
Hao Fang35ee2802020-04-02 14:53:03 +0800227module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
228MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
Hao Fang39977f42019-11-07 11:48:29 +0800229
Zhou Wang62c455c2019-08-02 15:57:52 +0800230static const struct pci_device_id hisi_zip_dev_ids[] = {
231 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
Zhou Wang79e09f32019-08-02 15:57:53 +0800232 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) },
Zhou Wang62c455c2019-08-02 15:57:52 +0800233 { 0, }
234};
235MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
236
Barry Song813ec3f2020-07-05 21:18:59 +1200237int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
Zhou Wang62c455c2019-08-02 15:57:52 +0800238{
Barry Song813ec3f2020-07-05 21:18:59 +1200239 if (node == NUMA_NO_NODE)
240 node = cpu_to_node(smp_processor_id());
Zhou Wang62c455c2019-08-02 15:57:52 +0800241
Shukun Tan18f1ab32020-03-10 16:42:50 +0800242 return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
Zhou Wang62c455c2019-08-02 15:57:52 +0800243}
244
Shukun Tan84c9b782020-04-03 16:16:39 +0800245static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
Zhou Wang62c455c2019-08-02 15:57:52 +0800246{
Shukun Tan84c9b782020-04-03 16:16:39 +0800247 void __iomem *base = qm->io_base;
Zhou Wang62c455c2019-08-02 15:57:52 +0800248
249 /* qm user domain */
250 writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
251 writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE);
252 writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1);
253 writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE);
254 writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE);
255
256 /* qm cache */
257 writel(AXI_M_CFG, base + QM_AXI_M_CFG);
258 writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE);
259 /* disable FLR triggered by BME(bus master enable) */
260 writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG);
261 writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE);
262
263 /* cache */
264 writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
265 writel(CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
266 writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
267 writel(CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
268
269 /* user domain configurations */
270 writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
271 writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
272 writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
Zhangfei Gao9e00df72020-02-11 15:54:25 +0800273
Shukun Tan84c9b782020-04-03 16:16:39 +0800274 if (qm->use_sva) {
Zhangfei Gao9e00df72020-02-11 15:54:25 +0800275 writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
276 writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
277 } else {
278 writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
279 writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
280 }
Zhou Wang62c455c2019-08-02 15:57:52 +0800281
282 /* let's open all compression/decompression cores */
283 writel(DECOMP_CHECK_ENABLE | ALL_COMP_DECOMP_EN,
284 base + HZIP_CLOCK_GATE_CTRL);
285
286 /* enable sqc writeback */
287 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
288 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
289 FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
Shukun Tan84c9b782020-04-03 16:16:39 +0800290
291 return 0;
Zhou Wang62c455c2019-08-02 15:57:52 +0800292}
293
Shukun Taneaebf4c2020-01-20 15:30:06 +0800294static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
Zhou Wang62c455c2019-08-02 15:57:52 +0800295{
Shukun Tan7ce396f2020-05-09 17:43:59 +0800296 u32 val;
297
Zhou Wang62c455c2019-08-02 15:57:52 +0800298 if (qm->ver == QM_HW_V1) {
Shukun Taneaebf4c2020-01-20 15:30:06 +0800299 writel(HZIP_CORE_INT_MASK_ALL,
300 qm->io_base + HZIP_CORE_INT_MASK_REG);
Zhou Wangee1788c2019-10-21 15:41:00 +0800301 dev_info(&qm->pdev->dev, "Does not support hw error handle\n");
Zhou Wang62c455c2019-08-02 15:57:52 +0800302 return;
303 }
304
Shukun Taneaebf4c2020-01-20 15:30:06 +0800305 /* clear ZIP hw error source if having */
306 writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
307
Shukun Tande3daf42020-01-20 15:30:07 +0800308 /* configure error type */
309 writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
310 writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
311 writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
312 qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
313
Shukun Taneaebf4c2020-01-20 15:30:06 +0800314 /* enable ZIP hw error interrupts */
315 writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
Shukun Tan7ce396f2020-05-09 17:43:59 +0800316
317 /* enable ZIP block master OOO when m-bit error occur */
318 val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
319 val = val | HZIP_AXI_SHUTDOWN_ENABLE;
320 writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
Shukun Taneaebf4c2020-01-20 15:30:06 +0800321}
322
323static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
324{
Shukun Tan7ce396f2020-05-09 17:43:59 +0800325 u32 val;
326
Shukun Taneaebf4c2020-01-20 15:30:06 +0800327 /* disable ZIP hw error interrupts */
328 writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
Shukun Tan7ce396f2020-05-09 17:43:59 +0800329
330 /* disable ZIP block master OOO when m-bit error occur */
331 val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
332 val = val & ~HZIP_AXI_SHUTDOWN_ENABLE;
333 writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
Zhou Wang62c455c2019-08-02 15:57:52 +0800334}
335
Zhou Wang72c7a682019-08-02 15:57:55 +0800336static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
337{
338 struct hisi_zip *hisi_zip = file->ctrl->hisi_zip;
339
340 return &hisi_zip->qm;
341}
342
343static u32 current_qm_read(struct ctrl_debug_file *file)
344{
345 struct hisi_qm *qm = file_to_qm(file);
346
347 return readl(qm->io_base + QM_DFX_MB_CNT_VF);
348}
349
350static int current_qm_write(struct ctrl_debug_file *file, u32 val)
351{
352 struct hisi_qm *qm = file_to_qm(file);
Zhou Wang72c7a682019-08-02 15:57:55 +0800353 u32 vfq_num;
354 u32 tmp;
355
Shukun Tan619e4642020-04-02 14:53:01 +0800356 if (val > qm->vfs_num)
Zhou Wang72c7a682019-08-02 15:57:55 +0800357 return -EINVAL;
358
359 /* Calculate curr_qm_qp_num and store */
360 if (val == 0) {
361 qm->debug.curr_qm_qp_num = qm->qp_num;
362 } else {
Shukun Tan619e4642020-04-02 14:53:01 +0800363 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num;
364 if (val == qm->vfs_num)
Zhou Wang72c7a682019-08-02 15:57:55 +0800365 qm->debug.curr_qm_qp_num = qm->ctrl_qp_num -
Shukun Tan619e4642020-04-02 14:53:01 +0800366 qm->qp_num - (qm->vfs_num - 1) * vfq_num;
Zhou Wang72c7a682019-08-02 15:57:55 +0800367 else
368 qm->debug.curr_qm_qp_num = vfq_num;
369 }
370
371 writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
372 writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
373
374 tmp = val |
375 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
376 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
377
378 tmp = val |
379 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
380 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
381
382 return 0;
383}
384
385static u32 clear_enable_read(struct ctrl_debug_file *file)
386{
387 struct hisi_qm *qm = file_to_qm(file);
388
389 return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
390 SOFT_CTRL_CNT_CLR_CE_BIT;
391}
392
393static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
394{
395 struct hisi_qm *qm = file_to_qm(file);
396 u32 tmp;
397
398 if (val != 1 && val != 0)
399 return -EINVAL;
400
401 tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) &
402 ~SOFT_CTRL_CNT_CLR_CE_BIT) | val;
403 writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
404
405 return 0;
406}
407
408static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
409 size_t count, loff_t *pos)
410{
411 struct ctrl_debug_file *file = filp->private_data;
412 char tbuf[HZIP_BUF_SIZE];
413 u32 val;
414 int ret;
415
416 spin_lock_irq(&file->lock);
417 switch (file->index) {
418 case HZIP_CURRENT_QM:
419 val = current_qm_read(file);
420 break;
421 case HZIP_CLEAR_ENABLE:
422 val = clear_enable_read(file);
423 break;
424 default:
425 spin_unlock_irq(&file->lock);
426 return -EINVAL;
427 }
428 spin_unlock_irq(&file->lock);
429 ret = sprintf(tbuf, "%u\n", val);
430 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
431}
432
433static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
434 size_t count, loff_t *pos)
435{
436 struct ctrl_debug_file *file = filp->private_data;
437 char tbuf[HZIP_BUF_SIZE];
438 unsigned long val;
439 int len, ret;
440
441 if (*pos != 0)
442 return 0;
443
444 if (count >= HZIP_BUF_SIZE)
445 return -ENOSPC;
446
447 len = simple_write_to_buffer(tbuf, HZIP_BUF_SIZE - 1, pos, buf, count);
448 if (len < 0)
449 return len;
450
451 tbuf[len] = '\0';
452 if (kstrtoul(tbuf, 0, &val))
453 return -EFAULT;
454
455 spin_lock_irq(&file->lock);
456 switch (file->index) {
457 case HZIP_CURRENT_QM:
458 ret = current_qm_write(file, val);
459 if (ret)
460 goto err_input;
461 break;
462 case HZIP_CLEAR_ENABLE:
463 ret = clear_enable_write(file, val);
464 if (ret)
465 goto err_input;
466 break;
467 default:
468 ret = -EINVAL;
469 goto err_input;
470 }
471 spin_unlock_irq(&file->lock);
472
473 return count;
474
475err_input:
476 spin_unlock_irq(&file->lock);
477 return ret;
478}
479
480static const struct file_operations ctrl_debug_fops = {
481 .owner = THIS_MODULE,
482 .open = simple_open,
483 .read = ctrl_debug_read,
484 .write = ctrl_debug_write,
485};
486
Longfang Liu6621e642020-05-15 17:13:58 +0800487
488static int zip_debugfs_atomic64_set(void *data, u64 val)
489{
490 if (val)
491 return -EINVAL;
492
493 atomic64_set((atomic64_t *)data, 0);
494
495 return 0;
496}
497
498static int zip_debugfs_atomic64_get(void *data, u64 *val)
499{
500 *val = atomic64_read((atomic64_t *)data);
501
502 return 0;
503}
504
505DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get,
506 zip_debugfs_atomic64_set, "%llu\n");
507
Zhou Wang72c7a682019-08-02 15:57:55 +0800508static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
509{
510 struct hisi_zip *hisi_zip = ctrl->hisi_zip;
511 struct hisi_qm *qm = &hisi_zip->qm;
512 struct device *dev = &qm->pdev->dev;
513 struct debugfs_regset32 *regset;
Greg Kroah-Hartman4a97bfc2019-11-07 09:52:00 +0100514 struct dentry *tmp_d;
Zhou Wang72c7a682019-08-02 15:57:55 +0800515 char buf[HZIP_BUF_SIZE];
516 int i;
517
518 for (i = 0; i < HZIP_CORE_NUM; i++) {
519 if (i < HZIP_COMP_CORE_NUM)
520 sprintf(buf, "comp_core%d", i);
521 else
522 sprintf(buf, "decomp_core%d", i - HZIP_COMP_CORE_NUM);
523
Zhou Wang72c7a682019-08-02 15:57:55 +0800524 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
525 if (!regset)
526 return -ENOENT;
527
528 regset->regs = hzip_dfx_regs;
529 regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
530 regset->base = qm->io_base + core_offsets[i];
531
Greg Kroah-Hartman4a97bfc2019-11-07 09:52:00 +0100532 tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
533 debugfs_create_regset32("regs", 0444, tmp_d, regset);
Zhou Wang72c7a682019-08-02 15:57:55 +0800534 }
535
536 return 0;
537}
538
Longfang Liu6621e642020-05-15 17:13:58 +0800539static void hisi_zip_dfx_debug_init(struct hisi_qm *qm)
540{
541 struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
542 struct hisi_zip_dfx *dfx = &zip->dfx;
543 struct dentry *tmp_dir;
544 void *data;
545 int i;
546
547 tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root);
548 for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) {
549 data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset);
550 debugfs_create_file(zip_dfx_files[i].name,
551 0644,
552 tmp_dir,
553 data,
554 &zip_atomic64_ops);
555 }
556}
557
Zhou Wang72c7a682019-08-02 15:57:55 +0800558static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
559{
Zhou Wang72c7a682019-08-02 15:57:55 +0800560 int i;
561
562 for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
563 spin_lock_init(&ctrl->files[i].lock);
564 ctrl->files[i].ctrl = ctrl;
565 ctrl->files[i].index = i;
566
Greg Kroah-Hartman4a97bfc2019-11-07 09:52:00 +0100567 debugfs_create_file(ctrl_debug_file_name[i], 0600,
568 ctrl->debug_root, ctrl->files + i,
569 &ctrl_debug_fops);
Zhou Wang72c7a682019-08-02 15:57:55 +0800570 }
571
572 return hisi_zip_core_debug_init(ctrl);
573}
574
575static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
576{
577 struct hisi_qm *qm = &hisi_zip->qm;
578 struct device *dev = &qm->pdev->dev;
579 struct dentry *dev_d;
580 int ret;
581
582 dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root);
Zhou Wang72c7a682019-08-02 15:57:55 +0800583
Shukun Tanc31dc9f2020-05-15 17:13:59 +0800584 qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET;
585 qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN;
Zhou Wang72c7a682019-08-02 15:57:55 +0800586 qm->debug.debug_root = dev_d;
587 ret = hisi_qm_debug_init(qm);
588 if (ret)
589 goto failed_to_create;
590
591 if (qm->fun_type == QM_HW_PF) {
592 hisi_zip->ctrl->debug_root = dev_d;
593 ret = hisi_zip_ctrl_debug_init(hisi_zip->ctrl);
594 if (ret)
595 goto failed_to_create;
596 }
597
Longfang Liu6621e642020-05-15 17:13:58 +0800598 hisi_zip_dfx_debug_init(qm);
599
Zhou Wang72c7a682019-08-02 15:57:55 +0800600 return 0;
601
602failed_to_create:
603 debugfs_remove_recursive(hzip_debugfs_root);
604 return ret;
605}
606
607static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip)
608{
609 struct hisi_qm *qm = &hisi_zip->qm;
610
611 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
612 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
613 writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
614
615 hisi_qm_debug_regs_clear(qm);
616}
617
618static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip)
619{
620 struct hisi_qm *qm = &hisi_zip->qm;
621
622 debugfs_remove_recursive(qm->debug.debug_root);
623
624 if (qm->fun_type == QM_HW_PF)
625 hisi_zip_debug_regs_clear(hisi_zip);
626}
627
Shukun Tanf826e6e2020-01-20 15:30:08 +0800628static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
629{
630 const struct hisi_zip_hw_error *err = zip_hw_error;
631 struct device *dev = &qm->pdev->dev;
632 u32 err_val;
633
634 while (err->msg) {
635 if (err->int_msk & err_sts) {
636 dev_err(dev, "%s [error status=0x%x] found\n",
637 err->msg, err->int_msk);
638
639 if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) {
640 err_val = readl(qm->io_base +
641 HZIP_CORE_SRAM_ECC_ERR_INFO);
642 dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n",
643 ((err_val >>
644 HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF));
645 dev_err(dev, "hisi-zip multi ecc sram addr=0x%x\n",
646 (err_val >>
647 HZIP_SRAM_ECC_ERR_ADDR_SHIFT));
648 }
649 }
650 err++;
651 }
Shukun Tanf826e6e2020-01-20 15:30:08 +0800652}
653
654static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
655{
656 return readl(qm->io_base + HZIP_CORE_INT_STATUS);
657}
658
Shukun Tan84c9b782020-04-03 16:16:39 +0800659static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
660{
661 writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
662}
663
664static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
665{
666 u32 val;
667
668 val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
669
670 writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE,
671 qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
672
673 writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
674 qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
675}
676
677static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
678{
679 u32 nfe_enb;
680
681 /* Disable ECC Mbit error report. */
682 nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
683 writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC,
684 qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
685
686 /* Inject zip ECC Mbit error to block master ooo. */
687 writel(HZIP_CORE_INT_STATUS_M_ECC,
688 qm->io_base + HZIP_CORE_INT_SET);
689}
690
Shukun Taneaebf4c2020-01-20 15:30:06 +0800691static const struct hisi_qm_err_ini hisi_zip_err_ini = {
Shukun Tan84c9b782020-04-03 16:16:39 +0800692 .hw_init = hisi_zip_set_user_domain_and_cache,
Shukun Tanf826e6e2020-01-20 15:30:08 +0800693 .hw_err_enable = hisi_zip_hw_error_enable,
694 .hw_err_disable = hisi_zip_hw_error_disable,
695 .get_dev_hw_err_status = hisi_zip_get_hw_err_status,
Shukun Tan84c9b782020-04-03 16:16:39 +0800696 .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status,
Shukun Tanf826e6e2020-01-20 15:30:08 +0800697 .log_dev_hw_err = hisi_zip_log_hw_error,
Shukun Tan84c9b782020-04-03 16:16:39 +0800698 .open_axi_master_ooo = hisi_zip_open_axi_master_ooo,
699 .close_axi_master_ooo = hisi_zip_close_axi_master_ooo,
Shukun Tanf826e6e2020-01-20 15:30:08 +0800700 .err_info = {
701 .ce = QM_BASE_CE,
702 .nfe = QM_BASE_NFE |
703 QM_ACC_WB_NOT_READY_TIMEOUT,
704 .fe = 0,
Shukun Tan84c9b782020-04-03 16:16:39 +0800705 .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC,
706 .msi_wr_port = HZIP_WR_PORT,
707 .acpi_rst = "ZRST",
Shukun Taneaebf4c2020-01-20 15:30:06 +0800708 }
709};
Zhou Wang62c455c2019-08-02 15:57:52 +0800710
711static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
712{
713 struct hisi_qm *qm = &hisi_zip->qm;
714 struct hisi_zip_ctrl *ctrl;
715
716 ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
717 if (!ctrl)
718 return -ENOMEM;
719
720 hisi_zip->ctrl = ctrl;
721 ctrl->hisi_zip = hisi_zip;
722
Weili Qian58ca0062020-05-20 17:19:50 +0800723 if (qm->ver == QM_HW_V1)
Zhou Wang62c455c2019-08-02 15:57:52 +0800724 qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1;
Weili Qian58ca0062020-05-20 17:19:50 +0800725 else
Zhou Wang62c455c2019-08-02 15:57:52 +0800726 qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2;
Zhou Wang62c455c2019-08-02 15:57:52 +0800727
Shukun Taneaebf4c2020-01-20 15:30:06 +0800728 qm->err_ini = &hisi_zip_err_ini;
729
Shukun Tan84c9b782020-04-03 16:16:39 +0800730 hisi_zip_set_user_domain_and_cache(qm);
Shukun Taneaebf4c2020-01-20 15:30:06 +0800731 hisi_qm_dev_err_init(qm);
Zhou Wang72c7a682019-08-02 15:57:55 +0800732 hisi_zip_debug_regs_clear(hisi_zip);
Zhou Wang62c455c2019-08-02 15:57:52 +0800733
734 return 0;
735}
736
Longfang Liucfd66a62020-05-09 17:43:56 +0800737static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
Hao Fang39977f42019-11-07 11:48:29 +0800738{
Hao Fang39977f42019-11-07 11:48:29 +0800739 qm->pdev = pdev;
Weili Qian58ca0062020-05-20 17:19:50 +0800740 qm->ver = pdev->revision;
Zhangfei Gao9e00df72020-02-11 15:54:25 +0800741 qm->algs = "zlib\ngzip";
Hao Fang39977f42019-11-07 11:48:29 +0800742 qm->sqe_size = HZIP_SQE_SIZE;
743 qm->dev_name = hisi_zip_name;
Weili Qiand9701f82020-05-09 17:44:01 +0800744
Longfang Liucfd66a62020-05-09 17:43:56 +0800745 qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ?
746 QM_HW_PF : QM_HW_VF;
Weili Qiand9701f82020-05-09 17:44:01 +0800747 if (qm->fun_type == QM_HW_PF) {
748 qm->qp_base = HZIP_PF_DEF_Q_BASE;
749 qm->qp_num = pf_q_num;
750 qm->qm_list = &zip_devices;
751 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
752 /*
753 * have no way to get qm configure in VM in v1 hardware,
754 * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force
755 * to trigger only one VF in v1 hardware.
756 *
757 * v2 hardware has no such problem.
758 */
759 qm->qp_base = HZIP_PF_DEF_Q_NUM;
760 qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
761 }
Longfang Liucfd66a62020-05-09 17:43:56 +0800762
763 return hisi_qm_init(qm);
764}
765
766static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
767{
768 struct hisi_qm *qm = &hisi_zip->qm;
769 int ret;
Hao Fang39977f42019-11-07 11:48:29 +0800770
771 if (qm->fun_type == QM_HW_PF) {
772 ret = hisi_zip_pf_probe_init(hisi_zip);
773 if (ret)
774 return ret;
Longfang Liucfd66a62020-05-09 17:43:56 +0800775 }
776
777 return 0;
778}
779
780static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
781{
782 struct hisi_zip *hisi_zip;
783 struct hisi_qm *qm;
784 int ret;
785
786 hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
787 if (!hisi_zip)
788 return -ENOMEM;
789
Longfang Liucfd66a62020-05-09 17:43:56 +0800790 qm = &hisi_zip->qm;
791
792 ret = hisi_zip_qm_init(qm, pdev);
793 if (ret) {
794 pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret);
795 return ret;
796 }
797
798 ret = hisi_zip_probe_init(hisi_zip);
799 if (ret) {
800 pci_err(pdev, "Failed to probe (%d)!\n", ret);
801 goto err_qm_uninit;
Hao Fang39977f42019-11-07 11:48:29 +0800802 }
803
804 ret = hisi_qm_start(qm);
805 if (ret)
806 goto err_qm_uninit;
807
808 ret = hisi_zip_debugfs_init(hisi_zip);
809 if (ret)
810 dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
811
Shukun Tan18f1ab32020-03-10 16:42:50 +0800812 hisi_qm_add_to_list(qm, &zip_devices);
Hao Fang39977f42019-11-07 11:48:29 +0800813
Zhangfei Gao9e00df72020-02-11 15:54:25 +0800814 if (qm->uacce) {
815 ret = uacce_register(qm->uacce);
816 if (ret)
817 goto err_qm_uninit;
818 }
819
Hao Fang39977f42019-11-07 11:48:29 +0800820 if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
Shukun Tancd1b7ae2020-04-02 14:53:02 +0800821 ret = hisi_qm_sriov_enable(pdev, vfs_num);
Hao Fang39977f42019-11-07 11:48:29 +0800822 if (ret < 0)
823 goto err_remove_from_list;
824 }
825
826 return 0;
827
828err_remove_from_list:
Shukun Tan18f1ab32020-03-10 16:42:50 +0800829 hisi_qm_del_from_list(qm, &zip_devices);
Hao Fang39977f42019-11-07 11:48:29 +0800830 hisi_zip_debugfs_exit(hisi_zip);
831 hisi_qm_stop(qm);
832err_qm_uninit:
833 hisi_qm_uninit(qm);
Longfang Liucfd66a62020-05-09 17:43:56 +0800834
Hao Fang39977f42019-11-07 11:48:29 +0800835 return ret;
836}
837
Zhou Wang62c455c2019-08-02 15:57:52 +0800838static void hisi_zip_remove(struct pci_dev *pdev)
839{
840 struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
841 struct hisi_qm *qm = &hisi_zip->qm;
842
Shukun Tan619e4642020-04-02 14:53:01 +0800843 if (qm->fun_type == QM_HW_PF && qm->vfs_num)
Shukun Tancd1b7ae2020-04-02 14:53:02 +0800844 hisi_qm_sriov_disable(pdev);
Zhou Wang79e09f32019-08-02 15:57:53 +0800845
Zhou Wang72c7a682019-08-02 15:57:55 +0800846 hisi_zip_debugfs_exit(hisi_zip);
Zhou Wang62c455c2019-08-02 15:57:52 +0800847 hisi_qm_stop(qm);
Zhou Wang79e09f32019-08-02 15:57:53 +0800848
Shukun Taneaebf4c2020-01-20 15:30:06 +0800849 hisi_qm_dev_err_uninit(qm);
Zhou Wang62c455c2019-08-02 15:57:52 +0800850 hisi_qm_uninit(qm);
Shukun Tan18f1ab32020-03-10 16:42:50 +0800851 hisi_qm_del_from_list(qm, &zip_devices);
Zhou Wang62c455c2019-08-02 15:57:52 +0800852}
853
Zhou Wang62c455c2019-08-02 15:57:52 +0800854static const struct pci_error_handlers hisi_zip_err_handler = {
Shukun Tanf826e6e2020-01-20 15:30:08 +0800855 .error_detected = hisi_qm_dev_err_detected,
Shukun Tan84c9b782020-04-03 16:16:39 +0800856 .slot_reset = hisi_qm_dev_slot_reset,
Shukun Tan7ce396f2020-05-09 17:43:59 +0800857 .reset_prepare = hisi_qm_reset_prepare,
858 .reset_done = hisi_qm_reset_done,
Zhou Wang62c455c2019-08-02 15:57:52 +0800859};
860
861static struct pci_driver hisi_zip_pci_driver = {
862 .name = "hisi_zip",
863 .id_table = hisi_zip_dev_ids,
864 .probe = hisi_zip_probe,
865 .remove = hisi_zip_remove,
Arnd Bergmannbf6a7a52019-09-19 16:05:52 +0200866 .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
Shukun Tancd1b7ae2020-04-02 14:53:02 +0800867 hisi_qm_sriov_configure : NULL,
Zhou Wang62c455c2019-08-02 15:57:52 +0800868 .err_handler = &hisi_zip_err_handler,
869};
870
Zhou Wang72c7a682019-08-02 15:57:55 +0800871static void hisi_zip_register_debugfs(void)
872{
873 if (!debugfs_initialized())
874 return;
875
876 hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL);
Zhou Wang72c7a682019-08-02 15:57:55 +0800877}
878
879static void hisi_zip_unregister_debugfs(void)
880{
881 debugfs_remove_recursive(hzip_debugfs_root);
882}
883
Zhou Wang62c455c2019-08-02 15:57:52 +0800884static int __init hisi_zip_init(void)
885{
886 int ret;
887
Shukun Tan18f1ab32020-03-10 16:42:50 +0800888 hisi_qm_init_list(&zip_devices);
Zhou Wang72c7a682019-08-02 15:57:55 +0800889 hisi_zip_register_debugfs();
890
Zhou Wang62c455c2019-08-02 15:57:52 +0800891 ret = pci_register_driver(&hisi_zip_pci_driver);
892 if (ret < 0) {
893 pr_err("Failed to register pci driver.\n");
Zhou Wang72c7a682019-08-02 15:57:55 +0800894 goto err_pci;
Zhou Wang62c455c2019-08-02 15:57:52 +0800895 }
896
Zhangfei Gao18bead72020-02-11 15:54:24 +0800897 ret = hisi_zip_register_to_crypto();
898 if (ret < 0) {
899 pr_err("Failed to register driver to crypto.\n");
900 goto err_crypto;
Zhou Wang62c455c2019-08-02 15:57:52 +0800901 }
902
903 return 0;
904
905err_crypto:
906 pci_unregister_driver(&hisi_zip_pci_driver);
Zhou Wang72c7a682019-08-02 15:57:55 +0800907err_pci:
908 hisi_zip_unregister_debugfs();
909
Zhou Wang62c455c2019-08-02 15:57:52 +0800910 return ret;
911}
912
913static void __exit hisi_zip_exit(void)
914{
Zhangfei Gao18bead72020-02-11 15:54:24 +0800915 hisi_zip_unregister_from_crypto();
Zhou Wang62c455c2019-08-02 15:57:52 +0800916 pci_unregister_driver(&hisi_zip_pci_driver);
Zhou Wang72c7a682019-08-02 15:57:55 +0800917 hisi_zip_unregister_debugfs();
Zhou Wang62c455c2019-08-02 15:57:52 +0800918}
919
920module_init(hisi_zip_init);
921module_exit(hisi_zip_exit);
922
923MODULE_LICENSE("GPL v2");
924MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
925MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator");