blob: 7b524b614f2253ee9f7d5cffc95a381f5f566ddf [file] [log] [blame]
Kalle Valof0553ca2019-02-19 19:45:26 +02001// SPDX-License-Identifier: ISC
Govind Singhba94c752018-10-11 13:16:26 +03002/*
3 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
Govind Singhba94c752018-10-11 13:16:26 +03004 */
5
6#include <linux/completion.h>
7#include <linux/device.h>
8#include <linux/debugfs.h>
9#include <linux/idr.h>
10#include <linux/kernel.h>
11#include <linux/of.h>
12#include <linux/of_address.h>
13#include <linux/module.h>
14#include <linux/net.h>
15#include <linux/platform_device.h>
16#include <linux/qcom_scm.h>
17#include <linux/string.h>
18#include <net/sock.h>
19
20#include "debug.h"
21#include "snoc.h"
22
23#define ATH10K_QMI_CLIENT_ID 0x4b4e454c
24#define ATH10K_QMI_TIMEOUT 30
25
26static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
27 struct ath10k_msa_mem_info *mem_info)
28{
29 struct qcom_scm_vmperm dst_perms[3];
30 struct ath10k *ar = qmi->ar;
31 unsigned int src_perms;
32 u32 perm_count;
33 int ret;
34
35 src_perms = BIT(QCOM_SCM_VMID_HLOS);
36
37 dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA;
38 dst_perms[0].perm = QCOM_SCM_PERM_RW;
39 dst_perms[1].vmid = QCOM_SCM_VMID_WLAN;
40 dst_perms[1].perm = QCOM_SCM_PERM_RW;
41
42 if (mem_info->secure) {
43 perm_count = 2;
44 } else {
45 dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE;
46 dst_perms[2].perm = QCOM_SCM_PERM_RW;
47 perm_count = 3;
48 }
49
50 ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
51 &src_perms, dst_perms, perm_count);
52 if (ret < 0)
53 ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret);
54
55 return ret;
56}
57
58static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi,
59 struct ath10k_msa_mem_info *mem_info)
60{
61 struct qcom_scm_vmperm dst_perms;
62 struct ath10k *ar = qmi->ar;
63 unsigned int src_perms;
64 int ret;
65
66 src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN);
67
68 if (!mem_info->secure)
69 src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE);
70
71 dst_perms.vmid = QCOM_SCM_VMID_HLOS;
72 dst_perms.perm = QCOM_SCM_PERM_RW;
73
74 ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
75 &src_perms, &dst_perms, 1);
76 if (ret < 0)
77 ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret);
78
79 return ret;
80}
81
82static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
83{
84 int ret;
85 int i;
86
87 for (i = 0; i < qmi->nr_mem_region; i++) {
88 ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
89 if (ret)
90 goto err_unmap;
91 }
92
93 return 0;
94
95err_unmap:
96 for (i--; i >= 0; i--)
97 ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
98 return ret;
99}
100
101static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
102{
103 int i;
104
105 for (i = 0; i < qmi->nr_mem_region; i++)
106 ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
107}
108
109static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
110{
111 struct wlfw_msa_info_resp_msg_v01 resp = {};
112 struct wlfw_msa_info_req_msg_v01 req = {};
113 struct ath10k *ar = qmi->ar;
Govind Singhc4130592019-07-31 17:12:20 +0530114 phys_addr_t max_mapped_addr;
Govind Singhba94c752018-10-11 13:16:26 +0300115 struct qmi_txn txn;
116 int ret;
117 int i;
118
119 req.msa_addr = qmi->msa_pa;
120 req.size = qmi->msa_mem_size;
121
122 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
123 wlfw_msa_info_resp_msg_v01_ei, &resp);
124 if (ret < 0)
125 goto out;
126
127 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
128 QMI_WLFW_MSA_INFO_REQ_V01,
129 WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN,
130 wlfw_msa_info_req_msg_v01_ei, &req);
131 if (ret < 0) {
132 qmi_txn_cancel(&txn);
133 ath10k_err(ar, "failed to send msa mem info req: %d\n", ret);
134 goto out;
135 }
136
137 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
138 if (ret < 0)
139 goto out;
140
141 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
142 ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error);
143 ret = -EINVAL;
144 goto out;
145 }
146
147 if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) {
148 ath10k_err(ar, "invalid memory region length received: %d\n",
149 resp.mem_region_info_len);
150 ret = -EINVAL;
151 goto out;
152 }
153
Govind Singhc4130592019-07-31 17:12:20 +0530154 max_mapped_addr = qmi->msa_pa + qmi->msa_mem_size;
Govind Singhba94c752018-10-11 13:16:26 +0300155 qmi->nr_mem_region = resp.mem_region_info_len;
156 for (i = 0; i < resp.mem_region_info_len; i++) {
Govind Singhc4130592019-07-31 17:12:20 +0530157 if (resp.mem_region_info[i].size > qmi->msa_mem_size ||
158 resp.mem_region_info[i].region_addr > max_mapped_addr ||
159 resp.mem_region_info[i].region_addr < qmi->msa_pa ||
160 resp.mem_region_info[i].size +
161 resp.mem_region_info[i].region_addr > max_mapped_addr) {
162 ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
163 resp.mem_region_info[i].region_addr,
164 resp.mem_region_info[i].size);
165 ret = -EINVAL;
166 goto fail_unwind;
167 }
Govind Singhba94c752018-10-11 13:16:26 +0300168 qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
169 qmi->mem_region[i].size = resp.mem_region_info[i].size;
170 qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
171 ath10k_dbg(ar, ATH10K_DBG_QMI,
172 "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
173 i, &qmi->mem_region[i].addr,
174 qmi->mem_region[i].size,
175 qmi->mem_region[i].secure);
176 }
177
178 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
179 return 0;
180
Govind Singhc4130592019-07-31 17:12:20 +0530181fail_unwind:
182 memset(&qmi->mem_region[0], 0, sizeof(qmi->mem_region[0]) * i);
Govind Singhba94c752018-10-11 13:16:26 +0300183out:
184 return ret;
185}
186
187static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi)
188{
189 struct wlfw_msa_ready_resp_msg_v01 resp = {};
190 struct wlfw_msa_ready_req_msg_v01 req = {};
191 struct ath10k *ar = qmi->ar;
192 struct qmi_txn txn;
193 int ret;
194
195 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
196 wlfw_msa_ready_resp_msg_v01_ei, &resp);
197 if (ret < 0)
198 goto out;
199
200 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
201 QMI_WLFW_MSA_READY_REQ_V01,
202 WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN,
203 wlfw_msa_ready_req_msg_v01_ei, &req);
204 if (ret < 0) {
205 qmi_txn_cancel(&txn);
206 ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret);
207 goto out;
208 }
209
210 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
211 if (ret < 0)
212 goto out;
213
214 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
215 ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error);
216 ret = -EINVAL;
217 }
218
219 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n");
220 return 0;
221
222out:
223 return ret;
224}
225
226static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
227{
228 struct wlfw_bdf_download_resp_msg_v01 resp = {};
229 struct wlfw_bdf_download_req_msg_v01 *req;
230 struct ath10k *ar = qmi->ar;
231 unsigned int remaining;
232 struct qmi_txn txn;
233 const u8 *temp;
234 int ret;
235
236 req = kzalloc(sizeof(*req), GFP_KERNEL);
237 if (!req)
238 return -ENOMEM;
239
240 temp = ar->normal_mode_fw.board_data;
241 remaining = ar->normal_mode_fw.board_len;
242
243 while (remaining) {
244 req->valid = 1;
245 req->file_id_valid = 1;
246 req->file_id = 0;
247 req->total_size_valid = 1;
248 req->total_size = ar->normal_mode_fw.board_len;
249 req->seg_id_valid = 1;
250 req->data_valid = 1;
251 req->end_valid = 1;
252
253 if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
254 req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
255 } else {
256 req->data_len = remaining;
257 req->end = 1;
258 }
259
260 memcpy(req->data, temp, req->data_len);
261
262 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
263 wlfw_bdf_download_resp_msg_v01_ei,
264 &resp);
265 if (ret < 0)
266 goto out;
267
268 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
269 QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
270 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
271 wlfw_bdf_download_req_msg_v01_ei, req);
272 if (ret < 0) {
273 qmi_txn_cancel(&txn);
274 goto out;
275 }
276
277 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
278
279 if (ret < 0)
280 goto out;
281
Jeffrey Hugo319c2b72019-11-13 07:46:46 -0800282 /* end = 1 triggers a CRC check on the BDF. If this fails, we
283 * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
284 * willing to use the BDF. For some platforms, all the valid
285 * released BDFs fail this CRC check, so attempt to detect this
286 * scenario and treat it as non-fatal.
287 */
288 if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
289 !(req->end == 1 &&
290 resp.resp.result == QMI_ERR_MALFORMED_MSG_V01)) {
Govind Singhba94c752018-10-11 13:16:26 +0300291 ath10k_err(ar, "failed to download board data file: %d\n",
292 resp.resp.error);
293 ret = -EINVAL;
294 goto out;
295 }
296
297 remaining -= req->data_len;
298 temp += req->data_len;
299 req->seg_id++;
300 }
301
302 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n");
303
304 kfree(req);
305 return 0;
306
307out:
308 kfree(req);
309 return ret;
310}
311
312static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
313{
314 struct wlfw_cal_report_resp_msg_v01 resp = {};
315 struct wlfw_cal_report_req_msg_v01 req = {};
316 struct ath10k *ar = qmi->ar;
Govind Singh75f545e2019-09-18 16:27:35 +0300317 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
Govind Singhba94c752018-10-11 13:16:26 +0300318 struct qmi_txn txn;
319 int i, j = 0;
320 int ret;
321
Govind Singh75f545e2019-09-18 16:27:35 +0300322 if (ar_snoc->xo_cal_supported) {
323 req.xo_cal_data_valid = 1;
324 req.xo_cal_data = ar_snoc->xo_cal_data;
325 }
326
Govind Singhba94c752018-10-11 13:16:26 +0300327 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
328 &resp);
329 if (ret < 0)
330 goto out;
331
332 for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) {
333 if (qmi->cal_data[i].total_size &&
334 qmi->cal_data[i].data) {
335 req.meta_data[j] = qmi->cal_data[i].cal_id;
336 j++;
337 }
338 }
339 req.meta_data_len = j;
340
341 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
342 QMI_WLFW_CAL_REPORT_REQ_V01,
343 WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
344 wlfw_cal_report_req_msg_v01_ei, &req);
345 if (ret < 0) {
346 qmi_txn_cancel(&txn);
347 ath10k_err(ar, "failed to send calibration request: %d\n", ret);
348 goto out;
349 }
350
351 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
352 if (ret < 0)
353 goto out;
354
355 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
356 ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error);
357 ret = -EINVAL;
358 goto out;
359 }
360
361 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n");
362 return 0;
363
364out:
365 return ret;
366}
367
368static int
369ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode)
370{
371 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
372 struct ath10k_qmi *qmi = ar_snoc->qmi;
373 struct wlfw_wlan_mode_resp_msg_v01 resp = {};
374 struct wlfw_wlan_mode_req_msg_v01 req = {};
375 struct qmi_txn txn;
376 int ret;
377
378 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
379 wlfw_wlan_mode_resp_msg_v01_ei,
380 &resp);
381 if (ret < 0)
382 goto out;
383
384 req.mode = mode;
385 req.hw_debug_valid = 1;
386 req.hw_debug = 0;
387
388 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
389 QMI_WLFW_WLAN_MODE_REQ_V01,
390 WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
391 wlfw_wlan_mode_req_msg_v01_ei, &req);
392 if (ret < 0) {
393 qmi_txn_cancel(&txn);
394 ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret);
395 goto out;
396 }
397
398 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
399 if (ret < 0)
400 goto out;
401
402 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
403 ath10k_err(ar, "more request rejected: %d\n", resp.resp.error);
404 ret = -EINVAL;
405 goto out;
406 }
407
408 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode);
409 return 0;
410
411out:
412 return ret;
413}
414
415static int
416ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar,
417 struct ath10k_qmi_wlan_enable_cfg *config,
418 const char *version)
419{
420 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
421 struct ath10k_qmi *qmi = ar_snoc->qmi;
422 struct wlfw_wlan_cfg_resp_msg_v01 resp = {};
423 struct wlfw_wlan_cfg_req_msg_v01 *req;
424 struct qmi_txn txn;
425 int ret;
426 u32 i;
427
428 req = kzalloc(sizeof(*req), GFP_KERNEL);
429 if (!req)
430 return -ENOMEM;
431
432 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
433 wlfw_wlan_cfg_resp_msg_v01_ei,
434 &resp);
435 if (ret < 0)
436 goto out;
437
438 req->host_version_valid = 0;
439
440 req->tgt_cfg_valid = 1;
441 if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
442 req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
443 else
444 req->tgt_cfg_len = config->num_ce_tgt_cfg;
445 for (i = 0; i < req->tgt_cfg_len; i++) {
446 req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
447 req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
448 req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
449 req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
450 req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
451 }
452
453 req->svc_cfg_valid = 1;
454 if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
455 req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
456 else
457 req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
458 for (i = 0; i < req->svc_cfg_len; i++) {
459 req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
460 req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
461 req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
462 }
463
464 req->shadow_reg_valid = 1;
465 if (config->num_shadow_reg_cfg >
466 QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
467 req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
468 else
469 req->shadow_reg_len = config->num_shadow_reg_cfg;
470
471 memcpy(req->shadow_reg, config->shadow_reg_cfg,
472 sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len);
473
474 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
475 QMI_WLFW_WLAN_CFG_REQ_V01,
476 WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
477 wlfw_wlan_cfg_req_msg_v01_ei, req);
478 if (ret < 0) {
479 qmi_txn_cancel(&txn);
480 ath10k_err(ar, "failed to send config request: %d\n", ret);
481 goto out;
482 }
483
484 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
485 if (ret < 0)
486 goto out;
487
488 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
489 ath10k_err(ar, "config request rejected: %d\n", resp.resp.error);
490 ret = -EINVAL;
491 goto out;
492 }
493
494 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n");
495 kfree(req);
496 return 0;
497
498out:
499 kfree(req);
500 return ret;
501}
502
503int ath10k_qmi_wlan_enable(struct ath10k *ar,
504 struct ath10k_qmi_wlan_enable_cfg *config,
505 enum wlfw_driver_mode_enum_v01 mode,
506 const char *version)
507{
508 int ret;
509
510 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n",
511 mode, config);
512
513 ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version);
514 if (ret) {
515 ath10k_err(ar, "failed to send qmi config: %d\n", ret);
516 return ret;
517 }
518
519 ret = ath10k_qmi_mode_send_sync_msg(ar, mode);
520 if (ret) {
521 ath10k_err(ar, "failed to send qmi mode: %d\n", ret);
522 return ret;
523 }
524
525 return 0;
526}
527
528int ath10k_qmi_wlan_disable(struct ath10k *ar)
529{
530 return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
531}
532
533static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
534{
535 struct wlfw_cap_resp_msg_v01 *resp;
536 struct wlfw_cap_req_msg_v01 req = {};
537 struct ath10k *ar = qmi->ar;
Govind Singh7b612ed2019-03-01 20:20:00 +0530538 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
Govind Singhba94c752018-10-11 13:16:26 +0300539 struct qmi_txn txn;
540 int ret;
541
542 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
543 if (!resp)
544 return -ENOMEM;
545
546 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp);
547 if (ret < 0)
548 goto out;
549
550 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
551 QMI_WLFW_CAP_REQ_V01,
552 WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
553 wlfw_cap_req_msg_v01_ei, &req);
554 if (ret < 0) {
555 qmi_txn_cancel(&txn);
556 ath10k_err(ar, "failed to send capability request: %d\n", ret);
557 goto out;
558 }
559
560 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
561 if (ret < 0)
562 goto out;
563
564 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
Colin Ian Kingeb9bd8b2018-10-18 22:51:04 +0100565 ath10k_err(ar, "capability req rejected: %d\n", resp->resp.error);
Govind Singhba94c752018-10-11 13:16:26 +0300566 ret = -EINVAL;
567 goto out;
568 }
569
570 if (resp->chip_info_valid) {
571 qmi->chip_info.chip_id = resp->chip_info.chip_id;
572 qmi->chip_info.chip_family = resp->chip_info.chip_family;
573 }
574
575 if (resp->board_info_valid)
576 qmi->board_info.board_id = resp->board_info.board_id;
577 else
578 qmi->board_info.board_id = 0xFF;
579
580 if (resp->soc_info_valid)
581 qmi->soc_info.soc_id = resp->soc_info.soc_id;
582
583 if (resp->fw_version_info_valid) {
584 qmi->fw_version = resp->fw_version_info.fw_version;
585 strlcpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
586 sizeof(qmi->fw_build_timestamp));
587 }
588
589 if (resp->fw_build_id_valid)
590 strlcpy(qmi->fw_build_id, resp->fw_build_id,
591 MAX_BUILD_ID_LEN + 1);
592
Govind Singh7b612ed2019-03-01 20:20:00 +0530593 if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
594 ath10k_info(ar, "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
595 qmi->chip_info.chip_id, qmi->chip_info.chip_family,
596 qmi->board_info.board_id, qmi->soc_info.soc_id);
597 ath10k_info(ar, "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
598 qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
599 }
Govind Singhba94c752018-10-11 13:16:26 +0300600
601 kfree(resp);
602 return 0;
603
604out:
605 kfree(resp);
606 return ret;
607}
608
609static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
610{
611 struct wlfw_host_cap_resp_msg_v01 resp = {};
612 struct wlfw_host_cap_req_msg_v01 req = {};
Bjorn Andersson7165ef82019-07-24 23:31:08 -0700613 struct qmi_elem_info *req_ei;
Govind Singhba94c752018-10-11 13:16:26 +0300614 struct ath10k *ar = qmi->ar;
Bjorn Andersson7165ef82019-07-24 23:31:08 -0700615 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
Govind Singhba94c752018-10-11 13:16:26 +0300616 struct qmi_txn txn;
617 int ret;
618
619 req.daemon_support_valid = 1;
620 req.daemon_support = 0;
621
Bjorn Andersson7165ef82019-07-24 23:31:08 -0700622 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
623 &resp);
Govind Singhba94c752018-10-11 13:16:26 +0300624 if (ret < 0)
625 goto out;
626
Bjorn Andersson7165ef82019-07-24 23:31:08 -0700627 if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
628 req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
629 else
630 req_ei = wlfw_host_cap_req_msg_v01_ei;
631
Govind Singhba94c752018-10-11 13:16:26 +0300632 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
633 QMI_WLFW_HOST_CAP_REQ_V01,
634 WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
Bjorn Andersson7165ef82019-07-24 23:31:08 -0700635 req_ei, &req);
Govind Singhba94c752018-10-11 13:16:26 +0300636 if (ret < 0) {
637 qmi_txn_cancel(&txn);
638 ath10k_err(ar, "failed to send host capability request: %d\n", ret);
639 goto out;
640 }
641
642 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
643 if (ret < 0)
644 goto out;
645
Jeffrey Hugo501d4152019-11-15 09:21:43 +0200646 /* older FW didn't support this request, which is not fatal */
647 if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
648 resp.resp.error != QMI_ERR_NOT_SUPPORTED_V01) {
Govind Singhba94c752018-10-11 13:16:26 +0300649 ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
650 ret = -EINVAL;
651 goto out;
652 }
653
Colin Ian Kingeb9bd8b2018-10-18 22:51:04 +0100654 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capability request completed\n");
Govind Singhba94c752018-10-11 13:16:26 +0300655 return 0;
656
657out:
658 return ret;
659}
660
Govind Singhd9e476982019-06-03 18:14:52 +0300661int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
662{
663 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
664 struct wlfw_ini_resp_msg_v01 resp = {};
665 struct ath10k_qmi *qmi = ar_snoc->qmi;
666 struct wlfw_ini_req_msg_v01 req = {};
667 struct qmi_txn txn;
668 int ret;
669
670 req.enablefwlog_valid = 1;
671 req.enablefwlog = fw_log_mode;
672
673 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_ini_resp_msg_v01_ei,
674 &resp);
675 if (ret < 0)
676 goto out;
677
678 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
679 QMI_WLFW_INI_REQ_V01,
680 WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
681 wlfw_ini_req_msg_v01_ei, &req);
682 if (ret < 0) {
683 qmi_txn_cancel(&txn);
Colin Ian King80ce8ca2019-07-02 13:39:04 +0100684 ath10k_err(ar, "failed to send fw log request: %d\n", ret);
Govind Singhd9e476982019-06-03 18:14:52 +0300685 goto out;
686 }
687
688 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
689 if (ret < 0)
690 goto out;
691
692 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
Colin Ian King80ce8ca2019-07-02 13:39:04 +0100693 ath10k_err(ar, "fw log request rejected: %d\n",
Govind Singhd9e476982019-06-03 18:14:52 +0300694 resp.resp.error);
695 ret = -EINVAL;
696 goto out;
697 }
698 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi fw log request completed, mode: %d\n",
699 fw_log_mode);
700 return 0;
701
702out:
703 return ret;
704}
705
Govind Singhba94c752018-10-11 13:16:26 +0300706static int
707ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
708{
709 struct wlfw_ind_register_resp_msg_v01 resp = {};
710 struct wlfw_ind_register_req_msg_v01 req = {};
711 struct ath10k *ar = qmi->ar;
Govind Singh75f545e2019-09-18 16:27:35 +0300712 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
Govind Singhba94c752018-10-11 13:16:26 +0300713 struct qmi_txn txn;
714 int ret;
715
716 req.client_id_valid = 1;
717 req.client_id = ATH10K_QMI_CLIENT_ID;
718 req.fw_ready_enable_valid = 1;
719 req.fw_ready_enable = 1;
720 req.msa_ready_enable_valid = 1;
721 req.msa_ready_enable = 1;
722
Govind Singh75f545e2019-09-18 16:27:35 +0300723 if (ar_snoc->xo_cal_supported) {
724 req.xo_cal_enable_valid = 1;
725 req.xo_cal_enable = 1;
726 }
727
Govind Singhba94c752018-10-11 13:16:26 +0300728 ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
729 wlfw_ind_register_resp_msg_v01_ei, &resp);
730 if (ret < 0)
731 goto out;
732
733 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
734 QMI_WLFW_IND_REGISTER_REQ_V01,
735 WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
736 wlfw_ind_register_req_msg_v01_ei, &req);
737 if (ret < 0) {
738 qmi_txn_cancel(&txn);
Colin Ian Kingeb9bd8b2018-10-18 22:51:04 +0100739 ath10k_err(ar, "failed to send indication registered request: %d\n", ret);
Govind Singhba94c752018-10-11 13:16:26 +0300740 goto out;
741 }
742
743 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
744 if (ret < 0)
745 goto out;
746
747 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
748 ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error);
749 ret = -EINVAL;
750 goto out;
751 }
752
753 if (resp.fw_status_valid) {
754 if (resp.fw_status & QMI_WLFW_FW_READY_V01)
755 qmi->fw_ready = true;
756 }
757 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n");
758 return 0;
759
760out:
761 return ret;
762}
763
764static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
765{
766 struct ath10k *ar = qmi->ar;
767 int ret;
768
769 ret = ath10k_qmi_ind_register_send_sync_msg(qmi);
770 if (ret)
771 return;
772
773 if (qmi->fw_ready) {
774 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
775 return;
776 }
777
778 ret = ath10k_qmi_host_cap_send_sync(qmi);
779 if (ret)
780 return;
781
782 ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi);
783 if (ret)
784 return;
785
Bjorn Anderssonb70b3a32019-11-13 15:35:58 -0800786 /*
787 * HACK: sleep for a while inbetween receiving the msa info response
788 * and the XPU update to prevent SDM845 from crashing due to a security
789 * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
790 */
791 msleep(20);
792
Govind Singhba94c752018-10-11 13:16:26 +0300793 ret = ath10k_qmi_setup_msa_permissions(qmi);
794 if (ret)
795 return;
796
797 ret = ath10k_qmi_msa_ready_send_sync_msg(qmi);
798 if (ret)
799 goto err_setup_msa;
800
801 ret = ath10k_qmi_cap_send_sync_msg(qmi);
802 if (ret)
803 goto err_setup_msa;
804
805 return;
806
807err_setup_msa:
808 ath10k_qmi_remove_msa_permission(qmi);
809}
810
811static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi)
812{
813 struct ath10k *ar = qmi->ar;
814
815 ar->hif.bus = ATH10K_BUS_SNOC;
816 ar->id.qmi_ids_valid = true;
817 ar->id.qmi_board_id = qmi->board_info.board_id;
818 ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR;
819
820 return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD);
821}
822
823static int
824ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
825 enum ath10k_qmi_driver_event_type type,
826 void *data)
827{
828 struct ath10k_qmi_driver_event *event;
829
830 event = kzalloc(sizeof(*event), GFP_ATOMIC);
831 if (!event)
832 return -ENOMEM;
833
834 event->type = type;
835 event->data = data;
836
837 spin_lock(&qmi->event_lock);
838 list_add_tail(&event->list, &qmi->event_list);
839 spin_unlock(&qmi->event_lock);
840
841 queue_work(qmi->event_wq, &qmi->event_work);
842
843 return 0;
844}
845
846static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
847{
848 struct ath10k *ar = qmi->ar;
Govind Singh3f14b732019-09-18 16:27:49 +0300849 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
Govind Singhba94c752018-10-11 13:16:26 +0300850
851 ath10k_qmi_remove_msa_permission(qmi);
852 ath10k_core_free_board_files(ar);
Govind Singh3f14b732019-09-18 16:27:49 +0300853 if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
854 ath10k_snoc_fw_crashed_dump(ar);
855
Govind Singhba94c752018-10-11 13:16:26 +0300856 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
857 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
858}
859
860static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi)
861{
862 int ret;
863
864 ret = ath10k_qmi_fetch_board_file(qmi);
865 if (ret)
866 goto out;
867
868 ret = ath10k_qmi_bdf_dnld_send_sync(qmi);
869 if (ret)
870 goto out;
871
872 ret = ath10k_qmi_send_cal_report_req(qmi);
873
874out:
875 return;
876}
877
878static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi)
879{
880 struct ath10k *ar = qmi->ar;
881
882 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n");
883 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
884
885 return 0;
886}
887
888static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl,
889 struct sockaddr_qrtr *sq,
890 struct qmi_txn *txn, const void *data)
891{
892 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
893
894 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL);
895}
896
897static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl,
898 struct sockaddr_qrtr *sq,
899 struct qmi_txn *txn, const void *data)
900{
901 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
902
903 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL);
904}
905
906static struct qmi_msg_handler qmi_msg_handler[] = {
907 {
908 .type = QMI_INDICATION,
909 .msg_id = QMI_WLFW_FW_READY_IND_V01,
910 .ei = wlfw_fw_ready_ind_msg_v01_ei,
911 .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
912 .fn = ath10k_qmi_fw_ready_ind,
913 },
914 {
915 .type = QMI_INDICATION,
916 .msg_id = QMI_WLFW_MSA_READY_IND_V01,
917 .ei = wlfw_msa_ready_ind_msg_v01_ei,
918 .decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01),
919 .fn = ath10k_qmi_msa_ready_ind,
920 },
921 {}
922};
923
924static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl,
925 struct qmi_service *service)
926{
927 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
928 struct sockaddr_qrtr *sq = &qmi->sq;
929 struct ath10k *ar = qmi->ar;
930 int ret;
931
932 sq->sq_family = AF_QIPCRTR;
933 sq->sq_node = service->node;
934 sq->sq_port = service->port;
935
936 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n");
937
938 ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq,
939 sizeof(qmi->sq), 0);
940 if (ret) {
941 ath10k_err(ar, "failed to connect to a remote QMI service port\n");
942 return ret;
943 }
944
945 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n");
946 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL);
947
948 return ret;
949}
950
951static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl,
952 struct qmi_service *service)
953{
954 struct ath10k_qmi *qmi =
955 container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
956
957 qmi->fw_ready = false;
958 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT, NULL);
959}
960
961static struct qmi_ops ath10k_qmi_ops = {
962 .new_server = ath10k_qmi_new_server,
963 .del_server = ath10k_qmi_del_server,
964};
965
966static void ath10k_qmi_driver_event_work(struct work_struct *work)
967{
968 struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi,
969 event_work);
970 struct ath10k_qmi_driver_event *event;
971 struct ath10k *ar = qmi->ar;
972
973 spin_lock(&qmi->event_lock);
974 while (!list_empty(&qmi->event_list)) {
975 event = list_first_entry(&qmi->event_list,
976 struct ath10k_qmi_driver_event, list);
977 list_del(&event->list);
978 spin_unlock(&qmi->event_lock);
979
980 switch (event->type) {
981 case ATH10K_QMI_EVENT_SERVER_ARRIVE:
982 ath10k_qmi_event_server_arrive(qmi);
983 break;
984 case ATH10K_QMI_EVENT_SERVER_EXIT:
985 ath10k_qmi_event_server_exit(qmi);
986 break;
987 case ATH10K_QMI_EVENT_FW_READY_IND:
988 ath10k_qmi_event_fw_ready_ind(qmi);
989 break;
990 case ATH10K_QMI_EVENT_MSA_READY_IND:
991 ath10k_qmi_event_msa_ready(qmi);
992 break;
993 default:
994 ath10k_warn(ar, "invalid event type: %d", event->type);
995 break;
996 }
997 kfree(event);
998 spin_lock(&qmi->event_lock);
999 }
1000 spin_unlock(&qmi->event_lock);
1001}
1002
1003static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
1004{
1005 struct ath10k *ar = qmi->ar;
1006 struct device *dev = ar->dev;
1007 struct device_node *node;
1008 struct resource r;
1009 int ret;
1010
1011 node = of_parse_phandle(dev->of_node, "memory-region", 0);
1012 if (node) {
1013 ret = of_address_to_resource(node, 0, &r);
1014 if (ret) {
1015 dev_err(dev, "failed to resolve msa fixed region\n");
1016 return ret;
1017 }
1018 of_node_put(node);
1019
1020 qmi->msa_pa = r.start;
1021 qmi->msa_mem_size = resource_size(&r);
1022 qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size,
1023 MEMREMAP_WT);
Dan Carpenterc28dcbc2018-12-20 09:53:41 +02001024 if (IS_ERR(qmi->msa_va)) {
Govind Singhba94c752018-10-11 13:16:26 +03001025 dev_err(dev, "failed to map memory region: %pa\n", &r.start);
Dan Carpenterc28dcbc2018-12-20 09:53:41 +02001026 return PTR_ERR(qmi->msa_va);
Govind Singhba94c752018-10-11 13:16:26 +03001027 }
1028 } else {
1029 qmi->msa_va = dmam_alloc_coherent(dev, msa_size,
1030 &qmi->msa_pa, GFP_KERNEL);
1031 if (!qmi->msa_va) {
1032 ath10k_err(ar, "failed to allocate dma memory for msa region\n");
1033 return -ENOMEM;
1034 }
1035 qmi->msa_mem_size = msa_size;
1036 }
1037
1038 ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n",
1039 &qmi->msa_pa,
1040 qmi->msa_va);
1041
1042 return 0;
1043}
1044
1045int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
1046{
1047 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1048 struct ath10k_qmi *qmi;
1049 int ret;
1050
1051 qmi = kzalloc(sizeof(*qmi), GFP_KERNEL);
1052 if (!qmi)
1053 return -ENOMEM;
1054
1055 qmi->ar = ar;
1056 ar_snoc->qmi = qmi;
1057
1058 ret = ath10k_qmi_setup_msa_resources(qmi, msa_size);
1059 if (ret)
1060 goto err;
1061
1062 ret = qmi_handle_init(&qmi->qmi_hdl,
1063 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
1064 &ath10k_qmi_ops, qmi_msg_handler);
1065 if (ret)
1066 goto err;
1067
1068 qmi->event_wq = alloc_workqueue("ath10k_qmi_driver_event",
1069 WQ_UNBOUND, 1);
1070 if (!qmi->event_wq) {
1071 ath10k_err(ar, "failed to allocate workqueue\n");
1072 ret = -EFAULT;
1073 goto err_release_qmi_handle;
1074 }
1075
1076 INIT_LIST_HEAD(&qmi->event_list);
1077 spin_lock_init(&qmi->event_lock);
1078 INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work);
1079
1080 ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01,
1081 WLFW_SERVICE_VERS_V01, 0);
1082 if (ret)
1083 goto err_qmi_lookup;
1084
1085 return 0;
1086
1087err_qmi_lookup:
1088 destroy_workqueue(qmi->event_wq);
1089
1090err_release_qmi_handle:
1091 qmi_handle_release(&qmi->qmi_hdl);
1092
1093err:
1094 kfree(qmi);
1095 return ret;
1096}
1097
1098int ath10k_qmi_deinit(struct ath10k *ar)
1099{
1100 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1101 struct ath10k_qmi *qmi = ar_snoc->qmi;
1102
1103 qmi_handle_release(&qmi->qmi_hdl);
1104 cancel_work_sync(&qmi->event_work);
1105 destroy_workqueue(qmi->event_wq);
Dundi Ravitejac709df52019-06-25 19:55:48 +05301106 kfree(qmi);
Govind Singhba94c752018-10-11 13:16:26 +03001107 ar_snoc->qmi = NULL;
1108
1109 return 0;
1110}