blob: 66cd8921df276cbf9a8c1c33ec5bff25dbaf13ca [file] [log] [blame]
Kalle Valo5e3dd152013-06-12 20:52:10 +03001/*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/skbuff.h>
19
20#include "core.h"
21#include "htc.h"
22#include "debug.h"
23#include "wmi.h"
24#include "mac.h"
25
26void ath10k_wmi_flush_tx(struct ath10k *ar)
27{
28 int ret;
29
Michal Kazioraffd3212013-07-16 09:54:35 +020030 lockdep_assert_held(&ar->conf_mutex);
31
32 if (ar->state == ATH10K_STATE_WEDGED) {
33 ath10k_warn("wmi flush skipped - device is wedged anyway\n");
34 return;
35 }
36
Kalle Valo5e3dd152013-06-12 20:52:10 +030037 ret = wait_event_timeout(ar->wmi.wq,
38 atomic_read(&ar->wmi.pending_tx_count) == 0,
39 5*HZ);
40 if (atomic_read(&ar->wmi.pending_tx_count) == 0)
41 return;
42
43 if (ret == 0)
44 ret = -ETIMEDOUT;
45
46 if (ret < 0)
47 ath10k_warn("wmi flush failed (%d)\n", ret);
48}
49
50int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
51{
52 int ret;
53 ret = wait_for_completion_timeout(&ar->wmi.service_ready,
54 WMI_SERVICE_READY_TIMEOUT_HZ);
55 return ret;
56}
57
58int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
59{
60 int ret;
61 ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
62 WMI_UNIFIED_READY_TIMEOUT_HZ);
63 return ret;
64}
65
66static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
67{
68 struct sk_buff *skb;
69 u32 round_len = roundup(len, 4);
70
71 skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len);
72 if (!skb)
73 return NULL;
74
75 skb_reserve(skb, WMI_SKB_HEADROOM);
76 if (!IS_ALIGNED((unsigned long)skb->data, 4))
77 ath10k_warn("Unaligned WMI skb\n");
78
79 skb_put(skb, round_len);
80 memset(skb->data, 0, round_len);
81
82 return skb;
83}
84
85static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
86{
87 dev_kfree_skb(skb);
88
89 if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
90 wake_up(&ar->wmi.wq);
91}
92
Michal Kaziorbe8b3942013-09-13 14:16:54 +020093static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
94 enum wmi_cmd_id cmd_id)
Kalle Valo5e3dd152013-06-12 20:52:10 +030095{
96 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
97 struct wmi_cmd_hdr *cmd_hdr;
Michal Kaziorbe8b3942013-09-13 14:16:54 +020098 int ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +030099 u32 cmd = 0;
100
101 if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
102 return -ENOMEM;
103
104 cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
105
106 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
107 cmd_hdr->cmd_id = __cpu_to_le32(cmd);
108
Kalle Valo5e3dd152013-06-12 20:52:10 +0300109 memset(skb_cb, 0, sizeof(*skb_cb));
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200110 ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
111 trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300112
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200113 if (ret)
114 goto err_pull;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300115
116 return 0;
Michal Kaziorbe8b3942013-09-13 14:16:54 +0200117
118err_pull:
119 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
120 return ret;
121}
122
123static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar,
124 enum ath10k_htc_ep_id eid)
125{
126 wake_up(&ar->wmi.tx_credits_wq);
127}
128
129static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
130 enum wmi_cmd_id cmd_id)
131{
132 int ret = -EINVAL;
133
134 wait_event_timeout(ar->wmi.tx_credits_wq, ({
135 ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
136 (ret != -EAGAIN);
137 }), 3*HZ);
138
139 if (ret)
140 dev_kfree_skb_any(skb);
141
142 return ret;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300143}
144
145static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
146{
147 struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
148 enum wmi_scan_event_type event_type;
149 enum wmi_scan_completion_reason reason;
150 u32 freq;
151 u32 req_id;
152 u32 scan_id;
153 u32 vdev_id;
154
155 event_type = __le32_to_cpu(event->event_type);
156 reason = __le32_to_cpu(event->reason);
157 freq = __le32_to_cpu(event->channel_freq);
158 req_id = __le32_to_cpu(event->scan_req_id);
159 scan_id = __le32_to_cpu(event->scan_id);
160 vdev_id = __le32_to_cpu(event->vdev_id);
161
162 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n");
163 ath10k_dbg(ATH10K_DBG_WMI,
164 "scan event type %d reason %d freq %d req_id %d "
165 "scan_id %d vdev_id %d\n",
166 event_type, reason, freq, req_id, scan_id, vdev_id);
167
168 spin_lock_bh(&ar->data_lock);
169
170 switch (event_type) {
171 case WMI_SCAN_EVENT_STARTED:
172 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n");
173 if (ar->scan.in_progress && ar->scan.is_roc)
174 ieee80211_ready_on_channel(ar->hw);
175
176 complete(&ar->scan.started);
177 break;
178 case WMI_SCAN_EVENT_COMPLETED:
179 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n");
180 switch (reason) {
181 case WMI_SCAN_REASON_COMPLETED:
182 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n");
183 break;
184 case WMI_SCAN_REASON_CANCELLED:
185 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n");
186 break;
187 case WMI_SCAN_REASON_PREEMPTED:
188 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n");
189 break;
190 case WMI_SCAN_REASON_TIMEDOUT:
191 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n");
192 break;
193 default:
194 break;
195 }
196
197 ar->scan_channel = NULL;
198 if (!ar->scan.in_progress) {
199 ath10k_warn("no scan requested, ignoring\n");
200 break;
201 }
202
203 if (ar->scan.is_roc) {
204 ath10k_offchan_tx_purge(ar);
205
206 if (!ar->scan.aborting)
207 ieee80211_remain_on_channel_expired(ar->hw);
208 } else {
209 ieee80211_scan_completed(ar->hw, ar->scan.aborting);
210 }
211
212 del_timer(&ar->scan.timeout);
213 complete_all(&ar->scan.completed);
214 ar->scan.in_progress = false;
215 break;
216 case WMI_SCAN_EVENT_BSS_CHANNEL:
217 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n");
218 ar->scan_channel = NULL;
219 break;
220 case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
221 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n");
222 ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
223 if (ar->scan.in_progress && ar->scan.is_roc &&
224 ar->scan.roc_freq == freq) {
225 complete(&ar->scan.on_channel);
226 }
227 break;
228 case WMI_SCAN_EVENT_DEQUEUED:
229 ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n");
230 break;
231 case WMI_SCAN_EVENT_PREEMPTED:
232 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n");
233 break;
234 case WMI_SCAN_EVENT_START_FAILED:
235 ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n");
236 break;
237 default:
238 break;
239 }
240
241 spin_unlock_bh(&ar->data_lock);
242 return 0;
243}
244
245static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
246{
247 enum ieee80211_band band;
248
249 switch (phy_mode) {
250 case MODE_11A:
251 case MODE_11NA_HT20:
252 case MODE_11NA_HT40:
253 case MODE_11AC_VHT20:
254 case MODE_11AC_VHT40:
255 case MODE_11AC_VHT80:
256 band = IEEE80211_BAND_5GHZ;
257 break;
258 case MODE_11G:
259 case MODE_11B:
260 case MODE_11GONLY:
261 case MODE_11NG_HT20:
262 case MODE_11NG_HT40:
263 case MODE_11AC_VHT20_2G:
264 case MODE_11AC_VHT40_2G:
265 case MODE_11AC_VHT80_2G:
266 default:
267 band = IEEE80211_BAND_2GHZ;
268 }
269
270 return band;
271}
272
273static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
274{
275 u8 rate_idx = 0;
276
277 /* rate in Kbps */
278 switch (rate) {
279 case 1000:
280 rate_idx = 0;
281 break;
282 case 2000:
283 rate_idx = 1;
284 break;
285 case 5500:
286 rate_idx = 2;
287 break;
288 case 11000:
289 rate_idx = 3;
290 break;
291 case 6000:
292 rate_idx = 4;
293 break;
294 case 9000:
295 rate_idx = 5;
296 break;
297 case 12000:
298 rate_idx = 6;
299 break;
300 case 18000:
301 rate_idx = 7;
302 break;
303 case 24000:
304 rate_idx = 8;
305 break;
306 case 36000:
307 rate_idx = 9;
308 break;
309 case 48000:
310 rate_idx = 10;
311 break;
312 case 54000:
313 rate_idx = 11;
314 break;
315 default:
316 break;
317 }
318
319 if (band == IEEE80211_BAND_5GHZ) {
320 if (rate_idx > 3)
321 /* Omit CCK rates */
322 rate_idx -= 4;
323 else
324 rate_idx = 0;
325 }
326
327 return rate_idx;
328}
329
330static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
331{
Michal Kazior0d9b0432013-08-09 10:13:33 +0200332 struct wmi_mgmt_rx_event_v1 *ev_v1;
333 struct wmi_mgmt_rx_event_v2 *ev_v2;
334 struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300335 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
336 struct ieee80211_hdr *hdr;
337 u32 rx_status;
338 u32 channel;
339 u32 phy_mode;
340 u32 snr;
341 u32 rate;
342 u32 buf_len;
343 u16 fc;
Michal Kazior0d9b0432013-08-09 10:13:33 +0200344 int pull_len;
Kalle Valo5e3dd152013-06-12 20:52:10 +0300345
Michal Kazior0d9b0432013-08-09 10:13:33 +0200346 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
347 ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
348 ev_hdr = &ev_v2->hdr.v1;
349 pull_len = sizeof(*ev_v2);
350 } else {
351 ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
352 ev_hdr = &ev_v1->hdr;
353 pull_len = sizeof(*ev_v1);
354 }
355
356 channel = __le32_to_cpu(ev_hdr->channel);
357 buf_len = __le32_to_cpu(ev_hdr->buf_len);
358 rx_status = __le32_to_cpu(ev_hdr->status);
359 snr = __le32_to_cpu(ev_hdr->snr);
360 phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
361 rate = __le32_to_cpu(ev_hdr->rate);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300362
363 memset(status, 0, sizeof(*status));
364
365 ath10k_dbg(ATH10K_DBG_MGMT,
366 "event mgmt rx status %08x\n", rx_status);
367
368 if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
369 dev_kfree_skb(skb);
370 return 0;
371 }
372
373 if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
374 dev_kfree_skb(skb);
375 return 0;
376 }
377
378 if (rx_status & WMI_RX_STATUS_ERR_CRC)
379 status->flag |= RX_FLAG_FAILED_FCS_CRC;
380 if (rx_status & WMI_RX_STATUS_ERR_MIC)
381 status->flag |= RX_FLAG_MMIC_ERROR;
382
383 status->band = phy_mode_to_band(phy_mode);
384 status->freq = ieee80211_channel_to_frequency(channel, status->band);
385 status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
386 status->rate_idx = get_rate_idx(rate, status->band);
387
Michal Kazior0d9b0432013-08-09 10:13:33 +0200388 skb_pull(skb, pull_len);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300389
390 hdr = (struct ieee80211_hdr *)skb->data;
391 fc = le16_to_cpu(hdr->frame_control);
392
393 if (fc & IEEE80211_FCTL_PROTECTED) {
394 status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
395 RX_FLAG_MMIC_STRIPPED;
396 hdr->frame_control = __cpu_to_le16(fc &
397 ~IEEE80211_FCTL_PROTECTED);
398 }
399
400 ath10k_dbg(ATH10K_DBG_MGMT,
401 "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
402 skb, skb->len,
403 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
404
405 ath10k_dbg(ATH10K_DBG_MGMT,
406 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
407 status->freq, status->band, status->signal,
408 status->rate_idx);
409
410 /*
411 * packets from HTC come aligned to 4byte boundaries
412 * because they can originally come in along with a trailer
413 */
414 skb_trim(skb, buf_len);
415
416 ieee80211_rx(ar->hw, skb);
417 return 0;
418}
419
Michal Kazior2e1dea42013-07-31 10:32:40 +0200420static int freq_to_idx(struct ath10k *ar, int freq)
421{
422 struct ieee80211_supported_band *sband;
423 int band, ch, idx = 0;
424
425 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
426 sband = ar->hw->wiphy->bands[band];
427 if (!sband)
428 continue;
429
430 for (ch = 0; ch < sband->n_channels; ch++, idx++)
431 if (sband->channels[ch].center_freq == freq)
432 goto exit;
433 }
434
435exit:
436 return idx;
437}
438
Kalle Valo5e3dd152013-06-12 20:52:10 +0300439static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
440{
Michal Kazior2e1dea42013-07-31 10:32:40 +0200441 struct wmi_chan_info_event *ev;
442 struct survey_info *survey;
443 u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
444 int idx;
445
446 ev = (struct wmi_chan_info_event *)skb->data;
447
448 err_code = __le32_to_cpu(ev->err_code);
449 freq = __le32_to_cpu(ev->freq);
450 cmd_flags = __le32_to_cpu(ev->cmd_flags);
451 noise_floor = __le32_to_cpu(ev->noise_floor);
452 rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
453 cycle_count = __le32_to_cpu(ev->cycle_count);
454
455 ath10k_dbg(ATH10K_DBG_WMI,
456 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
457 err_code, freq, cmd_flags, noise_floor, rx_clear_count,
458 cycle_count);
459
460 spin_lock_bh(&ar->data_lock);
461
462 if (!ar->scan.in_progress) {
463 ath10k_warn("chan info event without a scan request?\n");
464 goto exit;
465 }
466
467 idx = freq_to_idx(ar, freq);
468 if (idx >= ARRAY_SIZE(ar->survey)) {
469 ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n",
470 freq, idx);
471 goto exit;
472 }
473
474 if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
475 /* During scanning chan info is reported twice for each
476 * visited channel. The reported cycle count is global
477 * and per-channel cycle count must be calculated */
478
479 cycle_count -= ar->survey_last_cycle_count;
480 rx_clear_count -= ar->survey_last_rx_clear_count;
481
482 survey = &ar->survey[idx];
483 survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count);
484 survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
485 survey->noise = noise_floor;
486 survey->filled = SURVEY_INFO_CHANNEL_TIME |
487 SURVEY_INFO_CHANNEL_TIME_RX |
488 SURVEY_INFO_NOISE_DBM;
489 }
490
491 ar->survey_last_rx_clear_count = rx_clear_count;
492 ar->survey_last_cycle_count = cycle_count;
493
494exit:
495 spin_unlock_bh(&ar->data_lock);
Kalle Valo5e3dd152013-06-12 20:52:10 +0300496}
497
498static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
499{
500 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
501}
502
503static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
504{
505 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n");
506}
507
508static void ath10k_wmi_event_update_stats(struct ath10k *ar,
509 struct sk_buff *skb)
510{
511 struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
512
513 ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
514
515 ath10k_debug_read_target_stats(ar, ev);
516}
517
518static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
519 struct sk_buff *skb)
520{
521 struct wmi_vdev_start_response_event *ev;
522
523 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
524
525 ev = (struct wmi_vdev_start_response_event *)skb->data;
526
527 if (WARN_ON(__le32_to_cpu(ev->status)))
528 return;
529
530 complete(&ar->vdev_setup_done);
531}
532
533static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
534 struct sk_buff *skb)
535{
536 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
537 complete(&ar->vdev_setup_done);
538}
539
540static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
541 struct sk_buff *skb)
542{
543 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n");
544}
545
546/*
547 * FIXME
548 *
549 * We don't report to mac80211 sleep state of connected
550 * stations. Due to this mac80211 can't fill in TIM IE
551 * correctly.
552 *
553 * I know of no way of getting nullfunc frames that contain
554 * sleep transition from connected stations - these do not
555 * seem to be sent from the target to the host. There also
556 * doesn't seem to be a dedicated event for that. So the
557 * only way left to do this would be to read tim_bitmap
558 * during SWBA.
559 *
560 * We could probably try using tim_bitmap from SWBA to tell
561 * mac80211 which stations are asleep and which are not. The
562 * problem here is calling mac80211 functions so many times
563 * could take too long and make us miss the time to submit
564 * the beacon to the target.
565 *
566 * So as a workaround we try to extend the TIM IE if there
567 * is unicast buffered for stations with aid > 7 and fill it
568 * in ourselves.
569 */
570static void ath10k_wmi_update_tim(struct ath10k *ar,
571 struct ath10k_vif *arvif,
572 struct sk_buff *bcn,
573 struct wmi_bcn_info *bcn_info)
574{
575 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
576 struct ieee80211_tim_ie *tim;
577 u8 *ies, *ie;
578 u8 ie_len, pvm_len;
579
580 /* if next SWBA has no tim_changed the tim_bitmap is garbage.
581 * we must copy the bitmap upon change and reuse it later */
582 if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
583 int i;
584
585 BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
586 sizeof(bcn_info->tim_info.tim_bitmap));
587
588 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
589 __le32 t = bcn_info->tim_info.tim_bitmap[i / 4];
590 u32 v = __le32_to_cpu(t);
591 arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
592 }
593
594 /* FW reports either length 0 or 16
595 * so we calculate this on our own */
596 arvif->u.ap.tim_len = 0;
597 for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
598 if (arvif->u.ap.tim_bitmap[i])
599 arvif->u.ap.tim_len = i;
600
601 arvif->u.ap.tim_len++;
602 }
603
604 ies = bcn->data;
605 ies += ieee80211_hdrlen(hdr->frame_control);
606 ies += 12; /* fixed parameters */
607
608 ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
609 (u8 *)skb_tail_pointer(bcn) - ies);
610 if (!ie) {
Michal Kazior09af8f82013-07-05 16:15:08 +0300611 if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
612 ath10k_warn("no tim ie found;\n");
Kalle Valo5e3dd152013-06-12 20:52:10 +0300613 return;
614 }
615
616 tim = (void *)ie + 2;
617 ie_len = ie[1];
618 pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
619
620 if (pvm_len < arvif->u.ap.tim_len) {
621 int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
622 int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
623 void *next_ie = ie + 2 + ie_len;
624
625 if (skb_put(bcn, expand_size)) {
626 memmove(next_ie + expand_size, next_ie, move_size);
627
628 ie[1] += expand_size;
629 ie_len += expand_size;
630 pvm_len += expand_size;
631 } else {
632 ath10k_warn("tim expansion failed\n");
633 }
634 }
635
636 if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
637 ath10k_warn("tim pvm length is too great (%d)\n", pvm_len);
638 return;
639 }
640
641 tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
642 memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
643
644 ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
645 tim->dtim_count, tim->dtim_period,
646 tim->bitmap_ctrl, pvm_len);
647}
648
649static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
650 struct wmi_p2p_noa_info *noa)
651{
652 struct ieee80211_p2p_noa_attr *noa_attr;
653 u8 ctwindow_oppps = noa->ctwindow_oppps;
654 u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
655 bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
656 __le16 *noa_attr_len;
657 u16 attr_len;
658 u8 noa_descriptors = noa->num_descriptors;
659 int i;
660
661 /* P2P IE */
662 data[0] = WLAN_EID_VENDOR_SPECIFIC;
663 data[1] = len - 2;
664 data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
665 data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
666 data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
667 data[5] = WLAN_OUI_TYPE_WFA_P2P;
668
669 /* NOA ATTR */
670 data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
671 noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
672 noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
673
674 noa_attr->index = noa->index;
675 noa_attr->oppps_ctwindow = ctwindow;
676 if (oppps)
677 noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
678
679 for (i = 0; i < noa_descriptors; i++) {
680 noa_attr->desc[i].count =
681 __le32_to_cpu(noa->descriptors[i].type_count);
682 noa_attr->desc[i].duration = noa->descriptors[i].duration;
683 noa_attr->desc[i].interval = noa->descriptors[i].interval;
684 noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
685 }
686
687 attr_len = 2; /* index + oppps_ctwindow */
688 attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
689 *noa_attr_len = __cpu_to_le16(attr_len);
690}
691
692static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
693{
694 u32 len = 0;
695 u8 noa_descriptors = noa->num_descriptors;
696 u8 opp_ps_info = noa->ctwindow_oppps;
697 bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
698
699
700 if (!noa_descriptors && !opps_enabled)
701 return len;
702
703 len += 1 + 1 + 4; /* EID + len + OUI */
704 len += 1 + 2; /* noa attr + attr len */
705 len += 1 + 1; /* index + oppps_ctwindow */
706 len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
707
708 return len;
709}
710
711static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
712 struct sk_buff *bcn,
713 struct wmi_bcn_info *bcn_info)
714{
715 struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
716 u8 *new_data, *old_data = arvif->u.ap.noa_data;
717 u32 new_len;
718
719 if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
720 return;
721
722 ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
723 if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
724 new_len = ath10k_p2p_calc_noa_ie_len(noa);
725 if (!new_len)
726 goto cleanup;
727
728 new_data = kmalloc(new_len, GFP_ATOMIC);
729 if (!new_data)
730 goto cleanup;
731
732 ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
733
734 spin_lock_bh(&ar->data_lock);
735 arvif->u.ap.noa_data = new_data;
736 arvif->u.ap.noa_len = new_len;
737 spin_unlock_bh(&ar->data_lock);
738 kfree(old_data);
739 }
740
741 if (arvif->u.ap.noa_data)
742 if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
743 memcpy(skb_put(bcn, arvif->u.ap.noa_len),
744 arvif->u.ap.noa_data,
745 arvif->u.ap.noa_len);
746 return;
747
748cleanup:
749 spin_lock_bh(&ar->data_lock);
750 arvif->u.ap.noa_data = NULL;
751 arvif->u.ap.noa_len = 0;
752 spin_unlock_bh(&ar->data_lock);
753 kfree(old_data);
754}
755
756
757static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
758{
759 struct wmi_host_swba_event *ev;
760 u32 map;
761 int i = -1;
762 struct wmi_bcn_info *bcn_info;
763 struct ath10k_vif *arvif;
764 struct wmi_bcn_tx_arg arg;
765 struct sk_buff *bcn;
766 int vdev_id = 0;
767 int ret;
768
769 ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
770
771 ev = (struct wmi_host_swba_event *)skb->data;
772 map = __le32_to_cpu(ev->vdev_map);
773
774 ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
775 "-vdev map 0x%x\n",
776 ev->vdev_map);
777
778 for (; map; map >>= 1, vdev_id++) {
779 if (!(map & 0x1))
780 continue;
781
782 i++;
783
784 if (i >= WMI_MAX_AP_VDEV) {
785 ath10k_warn("swba has corrupted vdev map\n");
786 break;
787 }
788
789 bcn_info = &ev->bcn_info[i];
790
791 ath10k_dbg(ATH10K_DBG_MGMT,
792 "-bcn_info[%d]:\n"
793 "--tim_len %d\n"
794 "--tim_mcast %d\n"
795 "--tim_changed %d\n"
796 "--tim_num_ps_pending %d\n"
797 "--tim_bitmap 0x%08x%08x%08x%08x\n",
798 i,
799 __le32_to_cpu(bcn_info->tim_info.tim_len),
800 __le32_to_cpu(bcn_info->tim_info.tim_mcast),
801 __le32_to_cpu(bcn_info->tim_info.tim_changed),
802 __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
803 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
804 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
805 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
806 __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
807
808 arvif = ath10k_get_arvif(ar, vdev_id);
809 if (arvif == NULL) {
810 ath10k_warn("no vif for vdev_id %d found\n", vdev_id);
811 continue;
812 }
813
814 bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
815 if (!bcn) {
816 ath10k_warn("could not get mac80211 beacon\n");
817 continue;
818 }
819
820 ath10k_tx_h_seq_no(bcn);
821 ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
822 ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
823
824 arg.vdev_id = arvif->vdev_id;
825 arg.tx_rate = 0;
826 arg.tx_power = 0;
827 arg.bcn = bcn->data;
828 arg.bcn_len = bcn->len;
829
830 ret = ath10k_wmi_beacon_send(ar, &arg);
831 if (ret)
832 ath10k_warn("could not send beacon (%d)\n", ret);
833
834 dev_kfree_skb_any(bcn);
835 }
836}
837
838static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
839 struct sk_buff *skb)
840{
841 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
842}
843
844static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
845{
846 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n");
847}
848
849static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
850{
851 ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
852}
853
854static void ath10k_wmi_event_profile_match(struct ath10k *ar,
855 struct sk_buff *skb)
856{
857 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
858}
859
860static void ath10k_wmi_event_debug_print(struct ath10k *ar,
861 struct sk_buff *skb)
862{
863 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n");
864}
865
866static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
867{
868 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
869}
870
871static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
872 struct sk_buff *skb)
873{
874 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
875}
876
877static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
878 struct sk_buff *skb)
879{
880 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
881}
882
883static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
884 struct sk_buff *skb)
885{
886 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
887}
888
889static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
890 struct sk_buff *skb)
891{
892 ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
893}
894
895static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
896 struct sk_buff *skb)
897{
898 ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
899}
900
901static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
902 struct sk_buff *skb)
903{
904 ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
905}
906
907static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
908 struct sk_buff *skb)
909{
910 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
911}
912
913static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
914 struct sk_buff *skb)
915{
916 ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
917}
918
919static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
920 struct sk_buff *skb)
921{
922 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
923}
924
925static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
926 struct sk_buff *skb)
927{
928 ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
929}
930
931static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
932 struct sk_buff *skb)
933{
934 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
935}
936
937static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
938 struct sk_buff *skb)
939{
940 ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
941}
942
943static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
944 struct sk_buff *skb)
945{
946 ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
947}
948
949static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
950 struct sk_buff *skb)
951{
952 struct wmi_service_ready_event *ev = (void *)skb->data;
953
954 if (skb->len < sizeof(*ev)) {
955 ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
956 skb->len, sizeof(*ev));
957 return;
958 }
959
960 ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
961 ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
962 ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
963 ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
964 ar->fw_version_major =
965 (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
966 ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
967 ar->fw_version_release =
968 (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
969 ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
970 ar->phy_capability = __le32_to_cpu(ev->phy_capability);
Michal Kazior8865bee42013-07-24 12:36:46 +0200971 ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
972
Michal Kazior0d9b0432013-08-09 10:13:33 +0200973 if (ar->fw_version_build > 636)
974 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
975
Michal Kazior8865bee42013-07-24 12:36:46 +0200976 if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
977 ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
978 ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
979 ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
980 }
Kalle Valo5e3dd152013-06-12 20:52:10 +0300981
982 ar->ath_common.regulatory.current_rd =
983 __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
984
985 ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
986 sizeof(ev->wmi_service_bitmap));
987
988 if (strlen(ar->hw->wiphy->fw_version) == 0) {
989 snprintf(ar->hw->wiphy->fw_version,
990 sizeof(ar->hw->wiphy->fw_version),
991 "%u.%u.%u.%u",
992 ar->fw_version_major,
993 ar->fw_version_minor,
994 ar->fw_version_release,
995 ar->fw_version_build);
996 }
997
998 /* FIXME: it probably should be better to support this */
999 if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
1000 ath10k_warn("target requested %d memory chunks; ignoring\n",
1001 __le32_to_cpu(ev->num_mem_reqs));
1002 }
1003
1004 ath10k_dbg(ATH10K_DBG_WMI,
Michal Kazior8865bee42013-07-24 12:36:46 +02001005 "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
Kalle Valo5e3dd152013-06-12 20:52:10 +03001006 __le32_to_cpu(ev->sw_version),
1007 __le32_to_cpu(ev->sw_version_1),
1008 __le32_to_cpu(ev->abi_version),
1009 __le32_to_cpu(ev->phy_capability),
1010 __le32_to_cpu(ev->ht_cap_info),
1011 __le32_to_cpu(ev->vht_cap_info),
1012 __le32_to_cpu(ev->vht_supp_mcs),
1013 __le32_to_cpu(ev->sys_cap_info),
Michal Kazior8865bee42013-07-24 12:36:46 +02001014 __le32_to_cpu(ev->num_mem_reqs),
1015 __le32_to_cpu(ev->num_rf_chains));
Kalle Valo5e3dd152013-06-12 20:52:10 +03001016
1017 complete(&ar->wmi.service_ready);
1018}
1019
1020static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
1021{
1022 struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
1023
1024 if (WARN_ON(skb->len < sizeof(*ev)))
1025 return -EINVAL;
1026
1027 memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
1028
1029 ath10k_dbg(ATH10K_DBG_WMI,
1030 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
1031 __le32_to_cpu(ev->sw_version),
1032 __le32_to_cpu(ev->abi_version),
1033 ev->mac_addr.addr,
1034 __le32_to_cpu(ev->status));
1035
1036 complete(&ar->wmi.unified_ready);
1037 return 0;
1038}
1039
1040static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
1041{
1042 struct wmi_cmd_hdr *cmd_hdr;
1043 enum wmi_event_id id;
1044 u16 len;
1045
1046 cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1047 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
1048
1049 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
1050 return;
1051
1052 len = skb->len;
1053
1054 trace_ath10k_wmi_event(id, skb->data, skb->len);
1055
1056 switch (id) {
1057 case WMI_MGMT_RX_EVENTID:
1058 ath10k_wmi_event_mgmt_rx(ar, skb);
1059 /* mgmt_rx() owns the skb now! */
1060 return;
1061 case WMI_SCAN_EVENTID:
1062 ath10k_wmi_event_scan(ar, skb);
1063 break;
1064 case WMI_CHAN_INFO_EVENTID:
1065 ath10k_wmi_event_chan_info(ar, skb);
1066 break;
1067 case WMI_ECHO_EVENTID:
1068 ath10k_wmi_event_echo(ar, skb);
1069 break;
1070 case WMI_DEBUG_MESG_EVENTID:
1071 ath10k_wmi_event_debug_mesg(ar, skb);
1072 break;
1073 case WMI_UPDATE_STATS_EVENTID:
1074 ath10k_wmi_event_update_stats(ar, skb);
1075 break;
1076 case WMI_VDEV_START_RESP_EVENTID:
1077 ath10k_wmi_event_vdev_start_resp(ar, skb);
1078 break;
1079 case WMI_VDEV_STOPPED_EVENTID:
1080 ath10k_wmi_event_vdev_stopped(ar, skb);
1081 break;
1082 case WMI_PEER_STA_KICKOUT_EVENTID:
1083 ath10k_wmi_event_peer_sta_kickout(ar, skb);
1084 break;
1085 case WMI_HOST_SWBA_EVENTID:
1086 ath10k_wmi_event_host_swba(ar, skb);
1087 break;
1088 case WMI_TBTTOFFSET_UPDATE_EVENTID:
1089 ath10k_wmi_event_tbttoffset_update(ar, skb);
1090 break;
1091 case WMI_PHYERR_EVENTID:
1092 ath10k_wmi_event_phyerr(ar, skb);
1093 break;
1094 case WMI_ROAM_EVENTID:
1095 ath10k_wmi_event_roam(ar, skb);
1096 break;
1097 case WMI_PROFILE_MATCH:
1098 ath10k_wmi_event_profile_match(ar, skb);
1099 break;
1100 case WMI_DEBUG_PRINT_EVENTID:
1101 ath10k_wmi_event_debug_print(ar, skb);
1102 break;
1103 case WMI_PDEV_QVIT_EVENTID:
1104 ath10k_wmi_event_pdev_qvit(ar, skb);
1105 break;
1106 case WMI_WLAN_PROFILE_DATA_EVENTID:
1107 ath10k_wmi_event_wlan_profile_data(ar, skb);
1108 break;
1109 case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
1110 ath10k_wmi_event_rtt_measurement_report(ar, skb);
1111 break;
1112 case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
1113 ath10k_wmi_event_tsf_measurement_report(ar, skb);
1114 break;
1115 case WMI_RTT_ERROR_REPORT_EVENTID:
1116 ath10k_wmi_event_rtt_error_report(ar, skb);
1117 break;
1118 case WMI_WOW_WAKEUP_HOST_EVENTID:
1119 ath10k_wmi_event_wow_wakeup_host(ar, skb);
1120 break;
1121 case WMI_DCS_INTERFERENCE_EVENTID:
1122 ath10k_wmi_event_dcs_interference(ar, skb);
1123 break;
1124 case WMI_PDEV_TPC_CONFIG_EVENTID:
1125 ath10k_wmi_event_pdev_tpc_config(ar, skb);
1126 break;
1127 case WMI_PDEV_FTM_INTG_EVENTID:
1128 ath10k_wmi_event_pdev_ftm_intg(ar, skb);
1129 break;
1130 case WMI_GTK_OFFLOAD_STATUS_EVENTID:
1131 ath10k_wmi_event_gtk_offload_status(ar, skb);
1132 break;
1133 case WMI_GTK_REKEY_FAIL_EVENTID:
1134 ath10k_wmi_event_gtk_rekey_fail(ar, skb);
1135 break;
1136 case WMI_TX_DELBA_COMPLETE_EVENTID:
1137 ath10k_wmi_event_delba_complete(ar, skb);
1138 break;
1139 case WMI_TX_ADDBA_COMPLETE_EVENTID:
1140 ath10k_wmi_event_addba_complete(ar, skb);
1141 break;
1142 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
1143 ath10k_wmi_event_vdev_install_key_complete(ar, skb);
1144 break;
1145 case WMI_SERVICE_READY_EVENTID:
1146 ath10k_wmi_service_ready_event_rx(ar, skb);
1147 break;
1148 case WMI_READY_EVENTID:
1149 ath10k_wmi_ready_event_rx(ar, skb);
1150 break;
1151 default:
1152 ath10k_warn("Unknown eventid: %d\n", id);
1153 break;
1154 }
1155
1156 dev_kfree_skb(skb);
1157}
1158
1159static void ath10k_wmi_event_work(struct work_struct *work)
1160{
1161 struct ath10k *ar = container_of(work, struct ath10k,
1162 wmi.wmi_event_work);
1163 struct sk_buff *skb;
1164
1165 for (;;) {
1166 skb = skb_dequeue(&ar->wmi.wmi_event_list);
1167 if (!skb)
1168 break;
1169
1170 ath10k_wmi_event_process(ar, skb);
1171 }
1172}
1173
1174static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
1175{
1176 struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
1177 enum wmi_event_id event_id;
1178
1179 event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
1180
1181 /* some events require to be handled ASAP
1182 * thus can't be defered to a worker thread */
1183 switch (event_id) {
Kalle Valo5e3dd152013-06-12 20:52:10 +03001184 case WMI_MGMT_RX_EVENTID:
1185 ath10k_wmi_event_process(ar, skb);
1186 return;
1187 default:
1188 break;
1189 }
1190
1191 skb_queue_tail(&ar->wmi.wmi_event_list, skb);
1192 queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
1193}
1194
1195/* WMI Initialization functions */
1196int ath10k_wmi_attach(struct ath10k *ar)
1197{
1198 init_completion(&ar->wmi.service_ready);
1199 init_completion(&ar->wmi.unified_ready);
1200 init_waitqueue_head(&ar->wmi.wq);
Michal Kaziorbe8b3942013-09-13 14:16:54 +02001201 init_waitqueue_head(&ar->wmi.tx_credits_wq);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001202
1203 skb_queue_head_init(&ar->wmi.wmi_event_list);
1204 INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
1205
1206 return 0;
1207}
1208
1209void ath10k_wmi_detach(struct ath10k *ar)
1210{
1211 /* HTC should've drained the packets already */
1212 if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
1213 ath10k_warn("there are still pending packets\n");
1214
1215 cancel_work_sync(&ar->wmi.wmi_event_work);
1216 skb_queue_purge(&ar->wmi.wmi_event_list);
1217}
1218
1219int ath10k_wmi_connect_htc_service(struct ath10k *ar)
1220{
1221 int status;
1222 struct ath10k_htc_svc_conn_req conn_req;
1223 struct ath10k_htc_svc_conn_resp conn_resp;
1224
1225 memset(&conn_req, 0, sizeof(conn_req));
1226 memset(&conn_resp, 0, sizeof(conn_resp));
1227
1228 /* these fields are the same for all service endpoints */
1229 conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
1230 conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
Michal Kaziorbe8b3942013-09-13 14:16:54 +02001231 conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
Kalle Valo5e3dd152013-06-12 20:52:10 +03001232
1233 /* connect to control service */
1234 conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
1235
Michal Kaziorcd003fa2013-07-05 16:15:13 +03001236 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001237 if (status) {
1238 ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
1239 status);
1240 return status;
1241 }
1242
1243 ar->wmi.eid = conn_resp.eid;
1244 return 0;
1245}
1246
1247int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
1248 u16 rd5g, u16 ctl2g, u16 ctl5g)
1249{
1250 struct wmi_pdev_set_regdomain_cmd *cmd;
1251 struct sk_buff *skb;
1252
1253 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1254 if (!skb)
1255 return -ENOMEM;
1256
1257 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1258 cmd->reg_domain = __cpu_to_le32(rd);
1259 cmd->reg_domain_2G = __cpu_to_le32(rd2g);
1260 cmd->reg_domain_5G = __cpu_to_le32(rd5g);
1261 cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
1262 cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
1263
1264 ath10k_dbg(ATH10K_DBG_WMI,
1265 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
1266 rd, rd2g, rd5g, ctl2g, ctl5g);
1267
1268 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1269}
1270
1271int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
1272 const struct wmi_channel_arg *arg)
1273{
1274 struct wmi_set_channel_cmd *cmd;
1275 struct sk_buff *skb;
1276
1277 if (arg->passive)
1278 return -EINVAL;
1279
1280 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1281 if (!skb)
1282 return -ENOMEM;
1283
1284 cmd = (struct wmi_set_channel_cmd *)skb->data;
1285 cmd->chan.mhz = __cpu_to_le32(arg->freq);
1286 cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
1287 cmd->chan.mode = arg->mode;
1288 cmd->chan.min_power = arg->min_power;
1289 cmd->chan.max_power = arg->max_power;
1290 cmd->chan.reg_power = arg->max_reg_power;
1291 cmd->chan.reg_classid = arg->reg_class_id;
1292 cmd->chan.antenna_max = arg->max_antenna_gain;
1293
1294 ath10k_dbg(ATH10K_DBG_WMI,
1295 "wmi set channel mode %d freq %d\n",
1296 arg->mode, arg->freq);
1297
1298 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
1299}
1300
1301int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
1302{
1303 struct wmi_pdev_suspend_cmd *cmd;
1304 struct sk_buff *skb;
1305
1306 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1307 if (!skb)
1308 return -ENOMEM;
1309
1310 cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1311 cmd->suspend_opt = WMI_PDEV_SUSPEND;
1312
1313 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
1314}
1315
1316int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
1317{
1318 struct sk_buff *skb;
1319
1320 skb = ath10k_wmi_alloc_skb(0);
1321 if (skb == NULL)
1322 return -ENOMEM;
1323
1324 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
1325}
1326
1327int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
1328 u32 value)
1329{
1330 struct wmi_pdev_set_param_cmd *cmd;
1331 struct sk_buff *skb;
1332
1333 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1334 if (!skb)
1335 return -ENOMEM;
1336
1337 cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1338 cmd->param_id = __cpu_to_le32(id);
1339 cmd->param_value = __cpu_to_le32(value);
1340
1341 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
1342 id, value);
1343 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
1344}
1345
1346int ath10k_wmi_cmd_init(struct ath10k *ar)
1347{
1348 struct wmi_init_cmd *cmd;
1349 struct sk_buff *buf;
1350 struct wmi_resource_config config = {};
1351 u32 val;
1352
1353 config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
1354 config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
1355 config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
1356
1357 config.num_offload_reorder_bufs =
1358 __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
1359
1360 config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
1361 config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
1362 config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
1363 config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
1364 config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
1365 config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
1366 config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
1367 config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
1368 config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
1369 config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
1370
1371 config.scan_max_pending_reqs =
1372 __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
1373
1374 config.bmiss_offload_max_vdev =
1375 __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
1376
1377 config.roam_offload_max_vdev =
1378 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
1379
1380 config.roam_offload_max_ap_profiles =
1381 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
1382
1383 config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
1384 config.num_mcast_table_elems =
1385 __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
1386
1387 config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
1388 config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
1389 config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
1390 config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
1391 config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
1392
1393 val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
1394 config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
1395
1396 config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
1397
1398 config.gtk_offload_max_vdev =
1399 __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
1400
1401 config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
1402 config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
1403
1404 buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
1405 if (!buf)
1406 return -ENOMEM;
1407
1408 cmd = (struct wmi_init_cmd *)buf->data;
1409 cmd->num_host_mem_chunks = 0;
1410 memcpy(&cmd->resource_config, &config, sizeof(config));
1411
1412 ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
1413 return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
1414}
1415
1416static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
1417{
1418 int len;
1419
1420 len = sizeof(struct wmi_start_scan_cmd);
1421
1422 if (arg->ie_len) {
1423 if (!arg->ie)
1424 return -EINVAL;
1425 if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
1426 return -EINVAL;
1427
1428 len += sizeof(struct wmi_ie_data);
1429 len += roundup(arg->ie_len, 4);
1430 }
1431
1432 if (arg->n_channels) {
1433 if (!arg->channels)
1434 return -EINVAL;
1435 if (arg->n_channels > ARRAY_SIZE(arg->channels))
1436 return -EINVAL;
1437
1438 len += sizeof(struct wmi_chan_list);
1439 len += sizeof(__le32) * arg->n_channels;
1440 }
1441
1442 if (arg->n_ssids) {
1443 if (!arg->ssids)
1444 return -EINVAL;
1445 if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
1446 return -EINVAL;
1447
1448 len += sizeof(struct wmi_ssid_list);
1449 len += sizeof(struct wmi_ssid) * arg->n_ssids;
1450 }
1451
1452 if (arg->n_bssids) {
1453 if (!arg->bssids)
1454 return -EINVAL;
1455 if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
1456 return -EINVAL;
1457
1458 len += sizeof(struct wmi_bssid_list);
1459 len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
1460 }
1461
1462 return len;
1463}
1464
1465int ath10k_wmi_start_scan(struct ath10k *ar,
1466 const struct wmi_start_scan_arg *arg)
1467{
1468 struct wmi_start_scan_cmd *cmd;
1469 struct sk_buff *skb;
1470 struct wmi_ie_data *ie;
1471 struct wmi_chan_list *channels;
1472 struct wmi_ssid_list *ssids;
1473 struct wmi_bssid_list *bssids;
1474 u32 scan_id;
1475 u32 scan_req_id;
1476 int off;
1477 int len = 0;
1478 int i;
1479
1480 len = ath10k_wmi_start_scan_calc_len(arg);
1481 if (len < 0)
1482 return len; /* len contains error code here */
1483
1484 skb = ath10k_wmi_alloc_skb(len);
1485 if (!skb)
1486 return -ENOMEM;
1487
1488 scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
1489 scan_id |= arg->scan_id;
1490
1491 scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
1492 scan_req_id |= arg->scan_req_id;
1493
1494 cmd = (struct wmi_start_scan_cmd *)skb->data;
1495 cmd->scan_id = __cpu_to_le32(scan_id);
1496 cmd->scan_req_id = __cpu_to_le32(scan_req_id);
1497 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1498 cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
1499 cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
1500 cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
1501 cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
1502 cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time);
1503 cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time);
1504 cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
1505 cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
1506 cmd->idle_time = __cpu_to_le32(arg->idle_time);
1507 cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time);
1508 cmd->probe_delay = __cpu_to_le32(arg->probe_delay);
1509 cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
1510
1511 /* TLV list starts after fields included in the struct */
1512 off = sizeof(*cmd);
1513
1514 if (arg->n_channels) {
1515 channels = (void *)skb->data + off;
1516 channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
1517 channels->num_chan = __cpu_to_le32(arg->n_channels);
1518
1519 for (i = 0; i < arg->n_channels; i++)
1520 channels->channel_list[i] =
1521 __cpu_to_le32(arg->channels[i]);
1522
1523 off += sizeof(*channels);
1524 off += sizeof(__le32) * arg->n_channels;
1525 }
1526
1527 if (arg->n_ssids) {
1528 ssids = (void *)skb->data + off;
1529 ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
1530 ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
1531
1532 for (i = 0; i < arg->n_ssids; i++) {
1533 ssids->ssids[i].ssid_len =
1534 __cpu_to_le32(arg->ssids[i].len);
1535 memcpy(&ssids->ssids[i].ssid,
1536 arg->ssids[i].ssid,
1537 arg->ssids[i].len);
1538 }
1539
1540 off += sizeof(*ssids);
1541 off += sizeof(struct wmi_ssid) * arg->n_ssids;
1542 }
1543
1544 if (arg->n_bssids) {
1545 bssids = (void *)skb->data + off;
1546 bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
1547 bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
1548
1549 for (i = 0; i < arg->n_bssids; i++)
1550 memcpy(&bssids->bssid_list[i],
1551 arg->bssids[i].bssid,
1552 ETH_ALEN);
1553
1554 off += sizeof(*bssids);
1555 off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
1556 }
1557
1558 if (arg->ie_len) {
1559 ie = (void *)skb->data + off;
1560 ie->tag = __cpu_to_le32(WMI_IE_TAG);
1561 ie->ie_len = __cpu_to_le32(arg->ie_len);
1562 memcpy(ie->ie_data, arg->ie, arg->ie_len);
1563
1564 off += sizeof(*ie);
1565 off += roundup(arg->ie_len, 4);
1566 }
1567
1568 if (off != skb->len) {
1569 dev_kfree_skb(skb);
1570 return -EINVAL;
1571 }
1572
1573 ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
1574 return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
1575}
1576
1577void ath10k_wmi_start_scan_init(struct ath10k *ar,
1578 struct wmi_start_scan_arg *arg)
1579{
1580 /* setup commonly used values */
1581 arg->scan_req_id = 1;
1582 arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
1583 arg->dwell_time_active = 50;
1584 arg->dwell_time_passive = 150;
1585 arg->min_rest_time = 50;
1586 arg->max_rest_time = 500;
1587 arg->repeat_probe_time = 0;
1588 arg->probe_spacing_time = 0;
1589 arg->idle_time = 0;
1590 arg->max_scan_time = 5000;
1591 arg->probe_delay = 5;
1592 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
1593 | WMI_SCAN_EVENT_COMPLETED
1594 | WMI_SCAN_EVENT_BSS_CHANNEL
1595 | WMI_SCAN_EVENT_FOREIGN_CHANNEL
1596 | WMI_SCAN_EVENT_DEQUEUED;
1597 arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
1598 arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
1599 arg->n_bssids = 1;
1600 arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
1601}
1602
1603int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
1604{
1605 struct wmi_stop_scan_cmd *cmd;
1606 struct sk_buff *skb;
1607 u32 scan_id;
1608 u32 req_id;
1609
1610 if (arg->req_id > 0xFFF)
1611 return -EINVAL;
1612 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
1613 return -EINVAL;
1614
1615 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1616 if (!skb)
1617 return -ENOMEM;
1618
1619 scan_id = arg->u.scan_id;
1620 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
1621
1622 req_id = arg->req_id;
1623 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
1624
1625 cmd = (struct wmi_stop_scan_cmd *)skb->data;
1626 cmd->req_type = __cpu_to_le32(arg->req_type);
1627 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
1628 cmd->scan_id = __cpu_to_le32(scan_id);
1629 cmd->scan_req_id = __cpu_to_le32(req_id);
1630
1631 ath10k_dbg(ATH10K_DBG_WMI,
1632 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
1633 arg->req_id, arg->req_type, arg->u.scan_id);
1634 return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
1635}
1636
1637int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
1638 enum wmi_vdev_type type,
1639 enum wmi_vdev_subtype subtype,
1640 const u8 macaddr[ETH_ALEN])
1641{
1642 struct wmi_vdev_create_cmd *cmd;
1643 struct sk_buff *skb;
1644
1645 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1646 if (!skb)
1647 return -ENOMEM;
1648
1649 cmd = (struct wmi_vdev_create_cmd *)skb->data;
1650 cmd->vdev_id = __cpu_to_le32(vdev_id);
1651 cmd->vdev_type = __cpu_to_le32(type);
1652 cmd->vdev_subtype = __cpu_to_le32(subtype);
1653 memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
1654
1655 ath10k_dbg(ATH10K_DBG_WMI,
1656 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
1657 vdev_id, type, subtype, macaddr);
1658
1659 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
1660}
1661
1662int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
1663{
1664 struct wmi_vdev_delete_cmd *cmd;
1665 struct sk_buff *skb;
1666
1667 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1668 if (!skb)
1669 return -ENOMEM;
1670
1671 cmd = (struct wmi_vdev_delete_cmd *)skb->data;
1672 cmd->vdev_id = __cpu_to_le32(vdev_id);
1673
1674 ath10k_dbg(ATH10K_DBG_WMI,
1675 "WMI vdev delete id %d\n", vdev_id);
1676
1677 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
1678}
1679
1680static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
1681 const struct wmi_vdev_start_request_arg *arg,
1682 enum wmi_cmd_id cmd_id)
1683{
1684 struct wmi_vdev_start_request_cmd *cmd;
1685 struct sk_buff *skb;
1686 const char *cmdname;
1687 u32 flags = 0;
1688
1689 if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
1690 cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
1691 return -EINVAL;
1692 if (WARN_ON(arg->ssid && arg->ssid_len == 0))
1693 return -EINVAL;
1694 if (WARN_ON(arg->hidden_ssid && !arg->ssid))
1695 return -EINVAL;
1696 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1697 return -EINVAL;
1698
1699 if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
1700 cmdname = "start";
1701 else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
1702 cmdname = "restart";
1703 else
1704 return -EINVAL; /* should not happen, we already check cmd_id */
1705
1706 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1707 if (!skb)
1708 return -ENOMEM;
1709
1710 if (arg->hidden_ssid)
1711 flags |= WMI_VDEV_START_HIDDEN_SSID;
1712 if (arg->pmf_enabled)
1713 flags |= WMI_VDEV_START_PMF_ENABLED;
1714
1715 cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1716 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1717 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
1718 cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
1719 cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
1720 cmd->flags = __cpu_to_le32(flags);
1721 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
1722 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
1723
1724 if (arg->ssid) {
1725 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
1726 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1727 }
1728
1729 cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
1730
1731 cmd->chan.band_center_freq1 =
1732 __cpu_to_le32(arg->channel.band_center_freq1);
1733
1734 cmd->chan.mode = arg->channel.mode;
1735 cmd->chan.min_power = arg->channel.min_power;
1736 cmd->chan.max_power = arg->channel.max_power;
1737 cmd->chan.reg_power = arg->channel.max_reg_power;
1738 cmd->chan.reg_classid = arg->channel.reg_class_id;
1739 cmd->chan.antenna_max = arg->channel.max_antenna_gain;
1740
1741 ath10k_dbg(ATH10K_DBG_WMI,
1742 "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X,"
1743 "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq,
1744 arg->channel.mode, flags, arg->channel.max_power);
1745
1746 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1747}
1748
1749int ath10k_wmi_vdev_start(struct ath10k *ar,
1750 const struct wmi_vdev_start_request_arg *arg)
1751{
1752 return ath10k_wmi_vdev_start_restart(ar, arg,
1753 WMI_VDEV_START_REQUEST_CMDID);
1754}
1755
1756int ath10k_wmi_vdev_restart(struct ath10k *ar,
1757 const struct wmi_vdev_start_request_arg *arg)
1758{
1759 return ath10k_wmi_vdev_start_restart(ar, arg,
1760 WMI_VDEV_RESTART_REQUEST_CMDID);
1761}
1762
1763int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
1764{
1765 struct wmi_vdev_stop_cmd *cmd;
1766 struct sk_buff *skb;
1767
1768 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1769 if (!skb)
1770 return -ENOMEM;
1771
1772 cmd = (struct wmi_vdev_stop_cmd *)skb->data;
1773 cmd->vdev_id = __cpu_to_le32(vdev_id);
1774
1775 ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
1776
1777 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
1778}
1779
1780int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
1781{
1782 struct wmi_vdev_up_cmd *cmd;
1783 struct sk_buff *skb;
1784
1785 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1786 if (!skb)
1787 return -ENOMEM;
1788
1789 cmd = (struct wmi_vdev_up_cmd *)skb->data;
1790 cmd->vdev_id = __cpu_to_le32(vdev_id);
1791 cmd->vdev_assoc_id = __cpu_to_le32(aid);
1792 memcpy(&cmd->vdev_bssid.addr, bssid, 6);
1793
1794 ath10k_dbg(ATH10K_DBG_WMI,
1795 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1796 vdev_id, aid, bssid);
1797
1798 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
1799}
1800
1801int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
1802{
1803 struct wmi_vdev_down_cmd *cmd;
1804 struct sk_buff *skb;
1805
1806 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1807 if (!skb)
1808 return -ENOMEM;
1809
1810 cmd = (struct wmi_vdev_down_cmd *)skb->data;
1811 cmd->vdev_id = __cpu_to_le32(vdev_id);
1812
1813 ath10k_dbg(ATH10K_DBG_WMI,
1814 "wmi mgmt vdev down id 0x%x\n", vdev_id);
1815
1816 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
1817}
1818
1819int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
1820 enum wmi_vdev_param param_id, u32 param_value)
1821{
1822 struct wmi_vdev_set_param_cmd *cmd;
1823 struct sk_buff *skb;
1824
1825 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1826 if (!skb)
1827 return -ENOMEM;
1828
1829 cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1830 cmd->vdev_id = __cpu_to_le32(vdev_id);
1831 cmd->param_id = __cpu_to_le32(param_id);
1832 cmd->param_value = __cpu_to_le32(param_value);
1833
1834 ath10k_dbg(ATH10K_DBG_WMI,
1835 "wmi vdev id 0x%x set param %d value %d\n",
1836 vdev_id, param_id, param_value);
1837
1838 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
1839}
1840
1841int ath10k_wmi_vdev_install_key(struct ath10k *ar,
1842 const struct wmi_vdev_install_key_arg *arg)
1843{
1844 struct wmi_vdev_install_key_cmd *cmd;
1845 struct sk_buff *skb;
1846
1847 if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
1848 return -EINVAL;
1849 if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
1850 return -EINVAL;
1851
1852 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len);
1853 if (!skb)
1854 return -ENOMEM;
1855
1856 cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
1857 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1858 cmd->key_idx = __cpu_to_le32(arg->key_idx);
1859 cmd->key_flags = __cpu_to_le32(arg->key_flags);
1860 cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
1861 cmd->key_len = __cpu_to_le32(arg->key_len);
1862 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
1863 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
1864
1865 if (arg->macaddr)
1866 memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN);
1867 if (arg->key_data)
1868 memcpy(cmd->key_data, arg->key_data, arg->key_len);
1869
Michal Kaziore0c508a2013-07-05 16:15:17 +03001870 ath10k_dbg(ATH10K_DBG_WMI,
1871 "wmi vdev install key idx %d cipher %d len %d\n",
1872 arg->key_idx, arg->key_cipher, arg->key_len);
Kalle Valo5e3dd152013-06-12 20:52:10 +03001873 return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
1874}
1875
1876int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
1877 const u8 peer_addr[ETH_ALEN])
1878{
1879 struct wmi_peer_create_cmd *cmd;
1880 struct sk_buff *skb;
1881
1882 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1883 if (!skb)
1884 return -ENOMEM;
1885
1886 cmd = (struct wmi_peer_create_cmd *)skb->data;
1887 cmd->vdev_id = __cpu_to_le32(vdev_id);
1888 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
1889
1890 ath10k_dbg(ATH10K_DBG_WMI,
1891 "wmi peer create vdev_id %d peer_addr %pM\n",
1892 vdev_id, peer_addr);
1893 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
1894}
1895
1896int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
1897 const u8 peer_addr[ETH_ALEN])
1898{
1899 struct wmi_peer_delete_cmd *cmd;
1900 struct sk_buff *skb;
1901
1902 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1903 if (!skb)
1904 return -ENOMEM;
1905
1906 cmd = (struct wmi_peer_delete_cmd *)skb->data;
1907 cmd->vdev_id = __cpu_to_le32(vdev_id);
1908 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
1909
1910 ath10k_dbg(ATH10K_DBG_WMI,
1911 "wmi peer delete vdev_id %d peer_addr %pM\n",
1912 vdev_id, peer_addr);
1913 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
1914}
1915
1916int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
1917 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
1918{
1919 struct wmi_peer_flush_tids_cmd *cmd;
1920 struct sk_buff *skb;
1921
1922 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1923 if (!skb)
1924 return -ENOMEM;
1925
1926 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1927 cmd->vdev_id = __cpu_to_le32(vdev_id);
1928 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
1929 memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
1930
1931 ath10k_dbg(ATH10K_DBG_WMI,
1932 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
1933 vdev_id, peer_addr, tid_bitmap);
1934 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1935}
1936
1937int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
1938 const u8 *peer_addr, enum wmi_peer_param param_id,
1939 u32 param_value)
1940{
1941 struct wmi_peer_set_param_cmd *cmd;
1942 struct sk_buff *skb;
1943
1944 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1945 if (!skb)
1946 return -ENOMEM;
1947
1948 cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1949 cmd->vdev_id = __cpu_to_le32(vdev_id);
1950 cmd->param_id = __cpu_to_le32(param_id);
1951 cmd->param_value = __cpu_to_le32(param_value);
1952 memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
1953
1954 ath10k_dbg(ATH10K_DBG_WMI,
1955 "wmi vdev %d peer 0x%pM set param %d value %d\n",
1956 vdev_id, peer_addr, param_id, param_value);
1957
1958 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
1959}
1960
1961int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
1962 enum wmi_sta_ps_mode psmode)
1963{
1964 struct wmi_sta_powersave_mode_cmd *cmd;
1965 struct sk_buff *skb;
1966
1967 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1968 if (!skb)
1969 return -ENOMEM;
1970
1971 cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
1972 cmd->vdev_id = __cpu_to_le32(vdev_id);
1973 cmd->sta_ps_mode = __cpu_to_le32(psmode);
1974
1975 ath10k_dbg(ATH10K_DBG_WMI,
1976 "wmi set powersave id 0x%x mode %d\n",
1977 vdev_id, psmode);
1978
1979 return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1980}
1981
1982int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
1983 enum wmi_sta_powersave_param param_id,
1984 u32 value)
1985{
1986 struct wmi_sta_powersave_param_cmd *cmd;
1987 struct sk_buff *skb;
1988
1989 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
1990 if (!skb)
1991 return -ENOMEM;
1992
1993 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1994 cmd->vdev_id = __cpu_to_le32(vdev_id);
1995 cmd->param_id = __cpu_to_le32(param_id);
1996 cmd->param_value = __cpu_to_le32(value);
1997
1998 ath10k_dbg(ATH10K_DBG_WMI,
1999 "wmi sta ps param vdev_id 0x%x param %d value %d\n",
2000 vdev_id, param_id, value);
2001 return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
2002}
2003
2004int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
2005 enum wmi_ap_ps_peer_param param_id, u32 value)
2006{
2007 struct wmi_ap_ps_peer_cmd *cmd;
2008 struct sk_buff *skb;
2009
2010 if (!mac)
2011 return -EINVAL;
2012
2013 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2014 if (!skb)
2015 return -ENOMEM;
2016
2017 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
2018 cmd->vdev_id = __cpu_to_le32(vdev_id);
2019 cmd->param_id = __cpu_to_le32(param_id);
2020 cmd->param_value = __cpu_to_le32(value);
2021 memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
2022
2023 ath10k_dbg(ATH10K_DBG_WMI,
2024 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
2025 vdev_id, param_id, value, mac);
2026
2027 return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
2028}
2029
2030int ath10k_wmi_scan_chan_list(struct ath10k *ar,
2031 const struct wmi_scan_chan_list_arg *arg)
2032{
2033 struct wmi_scan_chan_list_cmd *cmd;
2034 struct sk_buff *skb;
2035 struct wmi_channel_arg *ch;
2036 struct wmi_channel *ci;
2037 int len;
2038 int i;
2039
2040 len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
2041
2042 skb = ath10k_wmi_alloc_skb(len);
2043 if (!skb)
2044 return -EINVAL;
2045
2046 cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2047 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
2048
2049 for (i = 0; i < arg->n_channels; i++) {
2050 u32 flags = 0;
2051
2052 ch = &arg->channels[i];
2053 ci = &cmd->chan_info[i];
2054
2055 if (ch->passive)
2056 flags |= WMI_CHAN_FLAG_PASSIVE;
2057 if (ch->allow_ibss)
2058 flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
2059 if (ch->allow_ht)
2060 flags |= WMI_CHAN_FLAG_ALLOW_HT;
2061 if (ch->allow_vht)
2062 flags |= WMI_CHAN_FLAG_ALLOW_VHT;
2063 if (ch->ht40plus)
2064 flags |= WMI_CHAN_FLAG_HT40_PLUS;
2065
2066 ci->mhz = __cpu_to_le32(ch->freq);
2067 ci->band_center_freq1 = __cpu_to_le32(ch->freq);
2068 ci->band_center_freq2 = 0;
2069 ci->min_power = ch->min_power;
2070 ci->max_power = ch->max_power;
2071 ci->reg_power = ch->max_reg_power;
2072 ci->antenna_max = ch->max_antenna_gain;
2073 ci->antenna_max = 0;
2074
2075 /* mode & flags share storage */
2076 ci->mode = ch->mode;
2077 ci->flags |= __cpu_to_le32(flags);
2078 }
2079
2080 return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
2081}
2082
2083int ath10k_wmi_peer_assoc(struct ath10k *ar,
2084 const struct wmi_peer_assoc_complete_arg *arg)
2085{
2086 struct wmi_peer_assoc_complete_cmd *cmd;
2087 struct sk_buff *skb;
2088
2089 if (arg->peer_mpdu_density > 16)
2090 return -EINVAL;
2091 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
2092 return -EINVAL;
2093 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
2094 return -EINVAL;
2095
2096 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2097 if (!skb)
2098 return -ENOMEM;
2099
2100 cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data;
2101 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2102 cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
2103 cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
2104 cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
2105 cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
2106 cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
2107 cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
2108 cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
2109 cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
2110 cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
2111 cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
2112 cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
2113 cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
2114
2115 memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN);
2116
2117 cmd->peer_legacy_rates.num_rates =
2118 __cpu_to_le32(arg->peer_legacy_rates.num_rates);
2119 memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
2120 arg->peer_legacy_rates.num_rates);
2121
2122 cmd->peer_ht_rates.num_rates =
2123 __cpu_to_le32(arg->peer_ht_rates.num_rates);
2124 memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
2125 arg->peer_ht_rates.num_rates);
2126
2127 cmd->peer_vht_rates.rx_max_rate =
2128 __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
2129 cmd->peer_vht_rates.rx_mcs_set =
2130 __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
2131 cmd->peer_vht_rates.tx_max_rate =
2132 __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
2133 cmd->peer_vht_rates.tx_mcs_set =
2134 __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
2135
Michal Kaziore0c508a2013-07-05 16:15:17 +03002136 ath10k_dbg(ATH10K_DBG_WMI,
2137 "wmi peer assoc vdev %d addr %pM\n",
2138 arg->vdev_id, arg->addr);
Kalle Valo5e3dd152013-06-12 20:52:10 +03002139 return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
2140}
2141
2142int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
2143{
2144 struct wmi_bcn_tx_cmd *cmd;
2145 struct sk_buff *skb;
2146
2147 skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
2148 if (!skb)
2149 return -ENOMEM;
2150
2151 cmd = (struct wmi_bcn_tx_cmd *)skb->data;
2152 cmd->hdr.vdev_id = __cpu_to_le32(arg->vdev_id);
2153 cmd->hdr.tx_rate = __cpu_to_le32(arg->tx_rate);
2154 cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power);
2155 cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
2156 memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
2157
2158 return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
2159}
2160
2161static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
2162 const struct wmi_wmm_params_arg *arg)
2163{
2164 params->cwmin = __cpu_to_le32(arg->cwmin);
2165 params->cwmax = __cpu_to_le32(arg->cwmax);
2166 params->aifs = __cpu_to_le32(arg->aifs);
2167 params->txop = __cpu_to_le32(arg->txop);
2168 params->acm = __cpu_to_le32(arg->acm);
2169 params->no_ack = __cpu_to_le32(arg->no_ack);
2170}
2171
2172int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
2173 const struct wmi_pdev_set_wmm_params_arg *arg)
2174{
2175 struct wmi_pdev_set_wmm_params *cmd;
2176 struct sk_buff *skb;
2177
2178 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2179 if (!skb)
2180 return -ENOMEM;
2181
2182 cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
2183 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
2184 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
2185 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
2186 ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
2187
2188 ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
2189 return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
2190}
2191
2192int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
2193{
2194 struct wmi_request_stats_cmd *cmd;
2195 struct sk_buff *skb;
2196
2197 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2198 if (!skb)
2199 return -ENOMEM;
2200
2201 cmd = (struct wmi_request_stats_cmd *)skb->data;
2202 cmd->stats_id = __cpu_to_le32(stats_id);
2203
2204 ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
2205 return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
2206}
Michal Kazior9cfbce72013-07-16 09:54:36 +02002207
2208int ath10k_wmi_force_fw_hang(struct ath10k *ar,
2209 enum wmi_force_fw_hang_type type, u32 delay_ms)
2210{
2211 struct wmi_force_fw_hang_cmd *cmd;
2212 struct sk_buff *skb;
2213
2214 skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
2215 if (!skb)
2216 return -ENOMEM;
2217
2218 cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
2219 cmd->type = __cpu_to_le32(type);
2220 cmd->delay_ms = __cpu_to_le32(delay_ms);
2221
2222 ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
2223 type, delay_ms);
2224 return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID);
2225}